From b6a7d49b6f1f7c494372fd1b9aab3982c9a299c7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 May 2023 08:56:42 -0500 Subject: [PATCH 001/562] `traceback.format_exception(...)` usage that is compatible with Python 3.7 and 3.11 (#15599) * Usage that is compatible with Python 3.8 and 3.11 > Since Python 3.10, instead of passing value and tb, an exception object can be passed as the first argument. If value and tb are provided, the first argument is ignored in order to provide backwards compatibility. > > -- https://docs.python.org/3/library/traceback.html * Add changelog --- changelog.d/15599.bugfix | 1 + synapse/app/_base.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15599.bugfix diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix new file mode 100644 index 0000000000..b58af8ad55 --- /dev/null +++ b/changelog.d/15599.bugfix @@ -0,0 +1 @@ +Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 4dfcf484fa..936b1b0430 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -214,7 +214,7 @@ def handle_startup_exception(e: Exception) -> NoReturn: # the reactor are written to the logs, followed by a summary to stderr. logger.exception("Exception during startup") - error_string = "".join(traceback.format_exception(e)) + error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__)) indented_error_string = indent(error_string, " ") quit_with_error( From 736199b7638175c439fff10a1f8a2d7da96838e5 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 19 May 2023 16:13:44 +0000 Subject: [PATCH 002/562] Remove old R30 because R30v2 supercedes it (#10428) R30v2 has been out since 2021-07-19 (https://github.com/matrix-org/synapse/pull/10332) and we started collecting stats on 2021-08-16. Since it's been over a year now (almost 2 years), this is enough grace period for us to now rip it out. --- changelog.d/10428.removal | 1 + .../reporting_homeserver_usage_statistics.md | 5 - synapse/app/phone_stats_home.py | 4 - synapse/storage/databases/main/metrics.py | 83 ---------- tests/app/test_phone_stats_home.py | 154 ------------------ 5 files changed, 1 insertion(+), 246 deletions(-) create mode 100644 changelog.d/10428.removal diff --git a/changelog.d/10428.removal b/changelog.d/10428.removal new file mode 100644 index 0000000000..c056e89585 --- /dev/null +++ b/changelog.d/10428.removal @@ -0,0 +1 @@ +Remove the old version of the R30 (30-day retained users) phone-home metric. diff --git a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md index 3a7ed7c806..60b758e33b 100644 --- a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md +++ b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md @@ -42,11 +42,6 @@ The following statistics are sent to the configured reporting endpoint: | `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. | | `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. | | `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. | -| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. | -| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. | -| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. | -| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. | -| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. | | `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. | | `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. | | `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. | diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 897dd3edac..09988670da 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -127,10 +127,6 @@ async def phone_stats_home( daily_sent_messages = await store.count_daily_sent_messages() stats["daily_sent_messages"] = daily_sent_messages - r30_results = await store.count_r30_users() - for name, count in r30_results.items(): - stats["r30_users_" + name] = count - r30v2_results = await store.count_r30v2_users() for name, count in r30v2_results.items(): stats["r30v2_users_" + name] = count diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 14294a0bb8..595e22982e 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -248,89 +248,6 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): (count,) = cast(Tuple[int], txn.fetchone()) return count - async def count_r30_users(self) -> Dict[str, int]: - """ - Counts the number of 30 day retained users, defined as:- - * Users who have created their accounts more than 30 days ago - * Where last seen at most 30 days ago - * Where account creation and last_seen are > 30 days apart - - Returns: - A mapping of counts globally as well as broken out by platform. - """ - - def _count_r30_users(txn: LoggingTransaction) -> Dict[str, int]: - thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time()) - thirty_days_ago_in_secs = now - thirty_days_in_secs - - sql = """ - SELECT platform, COUNT(*) FROM ( - SELECT - users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen, - CASE - WHEN user_agent LIKE '%%Android%%' THEN 'android' - WHEN user_agent LIKE '%%iOS%%' THEN 'ios' - WHEN user_agent LIKE '%%Electron%%' THEN 'electron' - WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' - WHEN user_agent LIKE '%%Gecko%%' THEN 'web' - ELSE 'unknown' - END - AS platform - FROM user_ips - ) uip - ON users.name = uip.user_id - AND users.appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, platform, users.creation_ts - ) u GROUP BY platform - """ - - results = {} - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - for row in txn: - if row[0] == "unknown": - pass - results[row[0]] = row[1] - - sql = """ - SELECT COUNT(*) FROM ( - SELECT users.name, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen - FROM user_ips - ) uip - ON users.name = uip.user_id - AND appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, users.creation_ts - ) u - """ - - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - (count,) = cast(Tuple[int], txn.fetchone()) - results["all"] = count - - return results - - return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) - async def count_r30v2_users(self) -> Dict[str, int]: """ Counts the number of 30 day retained users, defined as users that: diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py index a860eedbcf..9305b758d7 100644 --- a/tests/app/test_phone_stats_home.py +++ b/tests/app/test_phone_stats_home.py @@ -4,7 +4,6 @@ from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.util import Clock -from tests import unittest from tests.server import ThreadedMemoryReactorClock from tests.unittest import HomeserverTestCase @@ -12,154 +11,6 @@ FIVE_MINUTES_IN_SECONDS = 300 ONE_DAY_IN_SECONDS = 86400 -class PhoneHomeTestCase(HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, - room.register_servlets, - login.register_servlets, - ] - - # Override the retention time for the user_ips table because otherwise it - # gets pruned too aggressively for our R30 test. - @unittest.override_config({"user_ips_max_age": "365d"}) - def test_r30_minimum_usage(self) -> None: - """ - Tests the minimum amount of interaction necessary for the R30 metric - to consider a user 'retained'. - """ - - # Register a user, log it in, create a room and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Advance 30 days (+ 1 second, because strict inequality causes issues if we are - # bang on 30 days later). - self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) - - # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Send a message (this counts as activity) - self.helper.send(room_id, "message2", tok=access_token) - - # We have to wait some time for _update_client_ips_batch to get - # called and update the user_ips table. - self.reactor.advance(2 * 60 * 60) - - # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance 29 days. The user has now not posted for 29 days. - self.reactor.advance(29 * ONE_DAY_IN_SECONDS) - - # The user is still counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance another day. The user has now not posted for 30 days. - self.reactor.advance(ONE_DAY_IN_SECONDS) - - # The user is now no longer counted in R30. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - def test_r30_minimum_usage_using_default_config(self) -> None: - """ - Tests the minimum amount of interaction necessary for the R30 metric - to consider a user 'retained'. - - N.B. This test does not override the `user_ips_max_age` config setting, - which defaults to 28 days. - """ - - # Register a user, log it in, create a room and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Advance 30 days (+ 1 second, because strict inequality causes issues if we are - # bang on 30 days later). - self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) - - # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - # Send a message (this counts as activity) - self.helper.send(room_id, "message2", tok=access_token) - - # We have to wait some time for _update_client_ips_batch to get - # called and update the user_ips table. - self.reactor.advance(2 * 60 * 60) - - # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance 27 days. The user has now not posted for 27 days. - self.reactor.advance(27 * ONE_DAY_IN_SECONDS) - - # The user is still counted. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - # Advance another day. The user has now not posted for 28 days. - self.reactor.advance(ONE_DAY_IN_SECONDS) - - # The user is now no longer counted in R30. - # (This is because the user_ips table has been pruned, which by default - # only preserves the last 28 days of entries.) - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - def test_r30_user_must_be_retained_for_at_least_a_month(self) -> None: - """ - Tests that a newly-registered user must be retained for a whole month - before appearing in the R30 statistic, even if they post every day - during that time! - """ - # Register a user and send a message - user_id = self.register_user("u1", "secret!") - access_token = self.login("u1", "secret!") - room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token) - self.helper.send(room_id, "message", tok=access_token) - - # Check the user does not contribute to R30 yet. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 0}) - - for _ in range(30): - # This loop posts a message every day for 30 days - self.reactor.advance(ONE_DAY_IN_SECONDS) - self.helper.send(room_id, "I'm still here", tok=access_token) - - # Notice that the user *still* does not contribute to R30! - r30_results = self.get_success( - self.hs.get_datastores().main.count_r30_users() - ) - self.assertEqual(r30_results, {"all": 0}) - - self.reactor.advance(ONE_DAY_IN_SECONDS) - self.helper.send(room_id, "Still here!", tok=access_token) - - # *Now* the user appears in R30. - r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "unknown": 1}) - - class PhoneHomeR30V2TestCase(HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -363,11 +214,6 @@ class PhoneHomeR30V2TestCase(HomeserverTestCase): r30_results, {"all": 0, "android": 0, "electron": 0, "ios": 0, "web": 0} ) - # Check that this is a situation where old R30 differs: - # old R30 DOES count this as 'retained'. - r30_results = self.get_success(store.count_r30_users()) - self.assertEqual(r30_results, {"all": 1, "ios": 1}) - # Now we want to check that the user will still be able to appear in # R30v2 as long as the user performs some other activity between # 30 and 60 days later. From ca3c07e833816e69bbaf0372e6cc79f52e6db88e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 19 May 2023 11:18:45 -0500 Subject: [PATCH 003/562] Trace how many new events from the backfill response we need to process (#15633) You can kinda derive this information from how many `_process_pulled_event` spans there are but it would be nice to quickly glance. --- changelog.d/15633.misc | 1 + synapse/handlers/federation_event.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/15633.misc diff --git a/changelog.d/15633.misc b/changelog.d/15633.misc new file mode 100644 index 0000000000..4126a20602 --- /dev/null +++ b/changelog.d/15633.misc @@ -0,0 +1 @@ +Trace how many new events from the backfill response we need to process. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 06343d40e4..9a08618da5 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -890,6 +890,11 @@ class FederationEventHandler: # Continue on with the events that are new to us. new_events.append(event) + set_tag( + SynapseTags.RESULT_PREFIX + "new_events.length", + str(len(new_events)), + ) + # We want to sort these by depth so we process them and # tell clients about them in order. sorted_events = sorted(new_events, key=lambda x: x.depth) From 703a8f9c67cfe25b956dfdcca654818d52fa7ebd Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 19 May 2023 12:26:58 -0500 Subject: [PATCH 004/562] Instrument `state` and `state_group` storage related things (tracing) (#15610) Instrument `state` and `state_group` storage related things (tracing) so it's a little more clear where these database transactions are coming from as there is a lot of wires crossing in these functions. Part of `/messages` performance investigation: https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/15610.misc | 1 + synapse/events/snapshot.py | 5 +++ synapse/state/__init__.py | 4 +++ synapse/storage/controllers/state.py | 33 +++++++++++++++++++ synapse/storage/databases/state/bg_updates.py | 5 +++ synapse/storage/databases/state/store.py | 15 +++++++++ 6 files changed, 63 insertions(+) create mode 100644 changelog.d/15610.misc diff --git a/changelog.d/15610.misc b/changelog.d/15610.misc new file mode 100644 index 0000000000..2eff30f6e3 --- /dev/null +++ b/changelog.d/15610.misc @@ -0,0 +1 @@ +Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 9b4d692cf4..e7e8225b8e 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -19,6 +19,7 @@ from immutabledict import immutabledict from synapse.appservice import ApplicationService from synapse.events import EventBase +from synapse.logging.opentracing import tag_args, trace from synapse.types import JsonDict, StateMap if TYPE_CHECKING: @@ -242,6 +243,8 @@ class EventContext(UnpersistedEventContextBase): return self._state_group + @trace + @tag_args async def get_current_state_ids( self, state_filter: Optional["StateFilter"] = None ) -> Optional[StateMap[str]]: @@ -275,6 +278,8 @@ class EventContext(UnpersistedEventContextBase): return prev_state_ids + @trace + @tag_args async def get_prev_state_ids( self, state_filter: Optional["StateFilter"] = None ) -> StateMap[str]: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 6031095249..9bc0c3b7b9 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -45,6 +45,7 @@ from synapse.events.snapshot import ( UnpersistedEventContextBase, ) from synapse.logging.context import ContextResourceUsage +from synapse.logging.opentracing import tag_args, trace from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour @@ -270,6 +271,8 @@ class StateHandler: state = await entry.get_state(self._state_storage_controller, StateFilter.all()) return await self.store.get_joined_hosts(room_id, state, entry) + @trace + @tag_args async def calculate_context_info( self, event: EventBase, @@ -465,6 +468,7 @@ class StateHandler: return await unpersisted_context.persist(event) + @trace @measure_func() async def resolve_state_groups_for_events( self, room_id: str, event_ids: Collection[str], await_full_state: bool = True diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 9d7a8a792f..06a80869eb 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -67,6 +67,8 @@ class StateStorageController: """ self._partial_state_room_tracker.notify_un_partial_stated(room_id) + @trace + @tag_args async def get_state_group_delta( self, state_group: int ) -> Tuple[Optional[int], Optional[StateMap[str]]]: @@ -84,6 +86,8 @@ class StateStorageController: state_group_delta = await self.stores.state.get_state_group_delta(state_group) return state_group_delta.prev_group, state_group_delta.delta_ids + @trace + @tag_args async def get_state_groups_ids( self, _room_id: str, event_ids: Collection[str], await_full_state: bool = True ) -> Dict[int, MutableStateMap[str]]: @@ -114,6 +118,8 @@ class StateStorageController: return group_to_state + @trace + @tag_args async def get_state_ids_for_group( self, state_group: int, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: @@ -130,6 +136,8 @@ class StateStorageController: return group_to_state[state_group] + @trace + @tag_args async def get_state_groups( self, room_id: str, event_ids: Collection[str] ) -> Dict[int, List[EventBase]]: @@ -165,6 +173,8 @@ class StateStorageController: for group, event_id_map in group_to_ids.items() } + @trace + @tag_args def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter ) -> Awaitable[Dict[int, StateMap[str]]]: @@ -183,6 +193,7 @@ class StateStorageController: return self.stores.state._get_state_groups_from_groups(groups, state_filter) @trace + @tag_args async def get_state_for_events( self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: @@ -280,6 +291,8 @@ class StateStorageController: return {event: event_to_state[event] for event in event_ids} + @trace + @tag_args async def get_state_for_event( self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: @@ -303,6 +316,7 @@ class StateStorageController: return state_map[event_id] @trace + @tag_args async def get_state_ids_for_event( self, event_id: str, @@ -333,6 +347,8 @@ class StateStorageController: ) return state_map[event_id] + @trace + @tag_args def get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Awaitable[Dict[int, MutableStateMap[str]]]: @@ -402,6 +418,8 @@ class StateStorageController: event_id, room_id, prev_group, delta_ids, current_state_ids ) + @trace + @tag_args @cancellable async def get_current_state_ids( self, @@ -442,6 +460,8 @@ class StateStorageController: room_id, on_invalidate=on_invalidate ) + @trace + @tag_args async def get_canonical_alias_for_room(self, room_id: str) -> Optional[str]: """Get canonical alias for room, if any @@ -466,6 +486,8 @@ class StateStorageController: return event.content.get("canonical_alias") + @trace + @tag_args async def get_current_state_deltas( self, prev_stream_id: int, max_stream_id: int ) -> Tuple[int, List[Dict[str, Any]]]: @@ -500,6 +522,7 @@ class StateStorageController: ) @trace + @tag_args async def get_current_state( self, room_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: @@ -516,6 +539,8 @@ class StateStorageController: return state_map + @trace + @tag_args async def get_current_state_event( self, room_id: str, event_type: str, state_key: str ) -> Optional[EventBase]: @@ -527,6 +552,8 @@ class StateStorageController: ) return state_map.get(key) + @trace + @tag_args async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]: """Get current hosts in room based on current state. @@ -538,6 +565,8 @@ class StateStorageController: return await self.stores.main.get_current_hosts_in_room(room_id) + @trace + @tag_args async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]: """Get current hosts in room based on current state. @@ -553,6 +582,8 @@ class StateStorageController: return await self.stores.main.get_current_hosts_in_room_ordered(room_id) + @trace + @tag_args async def get_current_hosts_in_room_or_partial_state_approximation( self, room_id: str ) -> Collection[str]: @@ -582,6 +613,8 @@ class StateStorageController: return hosts + @trace + @tag_args async def get_users_in_room_with_profiles( self, room_id: str ) -> Mapping[str, ProfileInfo]: diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 097dea5182..86eb1a8a08 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union +from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -40,6 +41,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): updates. """ + @trace + @tag_args def _count_state_group_hops_txn( self, txn: LoggingTransaction, state_group: int ) -> int: @@ -83,6 +86,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): return count + @trace + @tag_args def _get_state_groups_from_groups_txn( self, txn: LoggingTransaction, diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 29ff64e876..6984d11352 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -20,6 +20,7 @@ import attr from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase +from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -159,6 +160,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): "get_state_group_delta", _get_state_group_delta_txn ) + @trace + @tag_args @cancellable async def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter @@ -187,6 +190,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return results + @trace + @tag_args def _get_state_for_group_using_cache( self, cache: DictionaryCache[int, StateKey, str], @@ -239,6 +244,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state_filter.filter_state(state_dict_ids), not missing_types + @trace + @tag_args @cancellable async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None @@ -305,6 +312,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return state + @trace + @tag_args def _get_state_for_groups_using_cache( self, groups: Iterable[int], @@ -403,6 +412,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): fetched_keys=non_member_types, ) + @trace + @tag_args async def store_state_deltas_for_batched( self, events_and_context: List[Tuple[EventBase, UnpersistedEventContextBase]], @@ -520,6 +531,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): prev_group, ) + @trace + @tag_args async def store_state_group( self, event_id: str, @@ -772,6 +785,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): ((sg,) for sg in state_groups_to_delete), ) + @trace + @tag_args async def get_previous_state_groups( self, state_groups: Iterable[int] ) -> Dict[int, int]: From adae1cfc8ce7c9a67aa8e1f51c393222365cae36 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 10:37:50 +0100 Subject: [PATCH 005/562] Bump types-setuptools from 67.7.0.2 to 67.8.0.0 (#15639) * Bump types-setuptools from 67.7.0.2 to 67.8.0.0 Bumps [types-setuptools](https://github.com/python/typeshed) from 67.7.0.2 to 67.8.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15639.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15639.misc diff --git a/changelog.d/15639.misc b/changelog.d/15639.misc new file mode 100644 index 0000000000..92230e206f --- /dev/null +++ b/changelog.d/15639.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.7.0.2 to 67.8.0.0. diff --git a/poetry.lock b/poetry.lock index 48a752986d..40af64a00d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3124,14 +3124,14 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "67.7.0.2" +version = "67.8.0.0" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.7.0.2.tar.gz", hash = "sha256:155789e85e79d5682b0d341919d4beb6140408ae52bac922af25b54e36ab25c0"}, - {file = "types_setuptools-67.7.0.2-py3-none-any.whl", hash = "sha256:bd30f6dbe9b83f0a7e6e3eab6d2df748aa4f55700d54e9f077d3aa30cc019445"}, + {file = "types-setuptools-67.8.0.0.tar.gz", hash = "sha256:95c9ed61871d6c0e258433373a4e1753c0a7c3627a46f4d4058c7b5a08ab844f"}, + {file = "types_setuptools-67.8.0.0-py3-none-any.whl", hash = "sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff"}, ] [[package]] From 8516001566362d4659c2ab498f83c90bd547106c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 10:38:01 +0100 Subject: [PATCH 006/562] Bump types-pillow from 9.5.0.2 to 9.5.0.4 (#15640) * Bump types-pillow from 9.5.0.2 to 9.5.0.4 Bumps [types-pillow](https://github.com/python/typeshed) from 9.5.0.2 to 9.5.0.4. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pillow dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15640.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15640.misc diff --git a/changelog.d/15640.misc b/changelog.d/15640.misc new file mode 100644 index 0000000000..4c2a3dbc52 --- /dev/null +++ b/changelog.d/15640.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.5.0.2 to 9.5.0.4. diff --git a/poetry.lock b/poetry.lock index 40af64a00d..56a89fe5b5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3058,14 +3058,14 @@ files = [ [[package]] name = "types-pillow" -version = "9.5.0.2" +version = "9.5.0.4" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.5.0.2.tar.gz", hash = "sha256:b3f9f621f259566c19c1deca21901017c8b1e3e200ed2e49e0a2d83c0a5175db"}, - {file = "types_Pillow-9.5.0.2-py3-none-any.whl", hash = "sha256:58fdebd0ffa2353ecccdd622adde23bce89da5c0c8b96c34f2d1eca7b7e42d0e"}, + {file = "types-Pillow-9.5.0.4.tar.gz", hash = "sha256:f1b6af47abd151847ee25911ffeba784899bc7dc7f9eba8ca6a5aac522b012ef"}, + {file = "types_Pillow-9.5.0.4-py3-none-any.whl", hash = "sha256:69427d9fa4320ff6e30f00fb9c0dd71185dc0a16de4757774220104759483466"}, ] [[package]] From 875015d512a80c2c72379340ead796fc9ca1c189 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 10:38:08 +0100 Subject: [PATCH 007/562] Bump sphinx from 6.1.3 to 6.2.1 (#15641) * Bump sphinx from 6.1.3 to 6.2.1 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 6.1.3 to 6.2.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/master/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v6.1.3...v6.2.1) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15641.misc | 1 + poetry.lock | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15641.misc diff --git a/changelog.d/15641.misc b/changelog.d/15641.misc new file mode 100644 index 0000000000..a85d85c58e --- /dev/null +++ b/changelog.d/15641.misc @@ -0,0 +1 @@ +Bump sphinx from 6.1.3 to 6.2.1. diff --git a/poetry.lock b/poetry.lock index 56a89fe5b5..4c2f554afc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2565,21 +2565,21 @@ files = [ [[package]] name = "sphinx" -version = "6.1.3" +version = "6.2.1" description = "Python documentation generator" category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "Sphinx-6.1.3.tar.gz", hash = "sha256:0dac3b698538ffef41716cf97ba26c1c7788dba73ce6f150c1ff5b4720786dd2"}, - {file = "sphinx-6.1.3-py3-none-any.whl", hash = "sha256:807d1cb3d6be87eb78a381c3e70ebd8d346b9a25f3753e9947e866b2786865fc"}, + {file = "Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b"}, + {file = "sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912"}, ] [package.dependencies] alabaster = ">=0.7,<0.8" babel = ">=2.9" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18,<0.20" +docutils = ">=0.18.1,<0.20" imagesize = ">=1.3" importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} Jinja2 = ">=3.0" @@ -2597,7 +2597,7 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"] -test = ["cython", "html5lib", "pytest (>=4.6)"] +test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] [[package]] name = "sphinx-autodoc2" From a47b2065f066396f41a306076282e86750d06728 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 12:12:59 +0100 Subject: [PATCH 008/562] Bump furo from 2023.3.27 to 2023.5.20 (#15642) * Bump furo from 2023.3.27 to 2023.5.20 Bumps [furo](https://github.com/pradyunsg/furo) from 2023.3.27 to 2023.5.20. - [Release notes](https://github.com/pradyunsg/furo/releases) - [Changelog](https://github.com/pradyunsg/furo/blob/main/docs/changelog.md) - [Commits](https://github.com/pradyunsg/furo/compare/2023.03.27...2023.05.20) --- updated-dependencies: - dependency-name: furo dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15642.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15642.misc diff --git a/changelog.d/15642.misc b/changelog.d/15642.misc new file mode 100644 index 0000000000..5d6125140d --- /dev/null +++ b/changelog.d/15642.misc @@ -0,0 +1 @@ +Bump furo from 2023.3.27 to 2023.5.20. diff --git a/poetry.lock b/poetry.lock index 4c2f554afc..b756406714 100644 --- a/poetry.lock +++ b/poetry.lock @@ -580,20 +580,20 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", [[package]] name = "furo" -version = "2023.3.27" +version = "2023.5.20" description = "A clean customisable Sphinx documentation theme." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "furo-2023.3.27-py3-none-any.whl", hash = "sha256:4ab2be254a2d5e52792d0ca793a12c35582dd09897228a6dd47885dabd5c9521"}, - {file = "furo-2023.3.27.tar.gz", hash = "sha256:b99e7867a5cc833b2b34d7230631dd6558c7a29f93071fdbb5709634bb33c5a5"}, + {file = "furo-2023.5.20-py3-none-any.whl", hash = "sha256:594a8436ddfe0c071f3a9e9a209c314a219d8341f3f1af33fdf7c69544fab9e6"}, + {file = "furo-2023.5.20.tar.gz", hash = "sha256:40e09fa17c6f4b22419d122e933089226dcdb59747b5b6c79363089827dea16f"}, ] [package.dependencies] beautifulsoup4 = "*" pygments = ">=2.7" -sphinx = ">=5.0,<7.0" +sphinx = ">=6.0,<8.0" sphinx-basic-ng = "*" [[package]] From cc53c96bf813045c08593d4996deb57915fbb0e5 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 22 May 2023 13:25:39 +0100 Subject: [PATCH 009/562] Limit the size of the `HomeServerConfig` cache in trial test runs (#15646) ...to try to control memory usage. `HomeServerConfig`s hold on to many Jinja2 objects, which come out to over 0.5 MiB per config. Over the course of a full test run, the cache grows to ~360 entries. Limit it to 8 entries. Part of #15622. Signed-off-by: Sean Quah --- changelog.d/15646.misc | 1 + tests/unittest.py | 23 +++++++---------------- 2 files changed, 8 insertions(+), 16 deletions(-) create mode 100644 changelog.d/15646.misc diff --git a/changelog.d/15646.misc b/changelog.d/15646.misc new file mode 100644 index 0000000000..872afe30b8 --- /dev/null +++ b/changelog.d/15646.misc @@ -0,0 +1 @@ +Limit the size of the `HomeServerConfig` cache in trial test runs. diff --git a/tests/unittest.py b/tests/unittest.py index 623c5a75a2..c73195b32b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import functools import gc import hashlib import hmac @@ -150,7 +151,11 @@ def deepcopy_config(config: _TConfig) -> _TConfig: return new_config -_make_homeserver_config_obj_cache: Dict[str, Union[RootConfig, Config]] = {} +@functools.lru_cache(maxsize=8) +def _parse_config_dict(config: str) -> RootConfig: + config_obj = HomeServerConfig() + config_obj.parse_config_dict(json.loads(config), "", "") + return config_obj def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig: @@ -164,21 +169,7 @@ def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig: but it keeps a cache of `HomeServerConfig` instances and deepcopies them as needed, to avoid validating the whole configuration every time. """ - cache_key = json.dumps(config) - - if cache_key in _make_homeserver_config_obj_cache: - # Cache hit: reuse the existing instance - config_obj = _make_homeserver_config_obj_cache[cache_key] - else: - # Cache miss; create the actual instance - config_obj = HomeServerConfig() - config_obj.parse_config_dict(config, "", "") - - # Add to the cache - _make_homeserver_config_obj_cache[cache_key] = config_obj - - assert isinstance(config_obj, RootConfig) - + config_obj = _parse_config_dict(json.dumps(config, sort_keys=True)) return deepcopy_config(config_obj) From 201597fc86b2fb88f26b529a7cc5f077efe3dd9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 15:39:19 +0100 Subject: [PATCH 010/562] Bump pygithub from 1.58.1 to 1.58.2 (#15643) * Bump pygithub from 1.58.1 to 1.58.2 Bumps [pygithub](https://github.com/pygithub/pygithub) from 1.58.1 to 1.58.2. - [Release notes](https://github.com/pygithub/pygithub/releases) - [Changelog](https://github.com/PyGithub/PyGithub/blob/v1.58.2/doc/changes.rst) - [Commits](https://github.com/pygithub/pygithub/compare/v1.58.1...v1.58.2) --- updated-dependencies: - dependency-name: pygithub dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15643.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15643.misc diff --git a/changelog.d/15643.misc b/changelog.d/15643.misc new file mode 100644 index 0000000000..5bd2e74071 --- /dev/null +++ b/changelog.d/15643.misc @@ -0,0 +1 @@ +Bump pygithub from 1.58.1 to 1.58.2. diff --git a/poetry.lock b/poetry.lock index b756406714..6f0374bb3f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1940,14 +1940,14 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygithub" -version = "1.58.1" +version = "1.58.2" description = "Use the full Github API v3" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.58.1-py3-none-any.whl", hash = "sha256:4e7fe9c3ec30d5fde5b4fbb97f18821c9dbf372bf6df337fe66f6689a65e0a83"}, - {file = "PyGithub-1.58.1.tar.gz", hash = "sha256:7d528b4ad92bc13122129fafd444ce3d04c47d2d801f6446b6e6ee2d410235b3"}, + {file = "PyGithub-1.58.2-py3-none-any.whl", hash = "sha256:f435884af617c6debaa76cbc355372d1027445a56fbc39972a3b9ed4968badc8"}, + {file = "PyGithub-1.58.2.tar.gz", hash = "sha256:1e6b1b7afe31f75151fb81f7ab6b984a7188a852bdb123dbb9ae90023c3ce60f"}, ] [package.dependencies] From c5d1e6d414fa7b4074bc72ca3719c1341a1c5379 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 22 May 2023 11:31:22 -0400 Subject: [PATCH 011/562] Properly parse event_fields in filters (#15607) The event_fields property in filters should use the proper escape rules, namely backslashes can be escaped with an additional backslash. This adds tests (adapted from matrix-js-sdk) and implements the logic to properly split the event_fields strings. --- changelog.d/15607.bugfix | 1 + synapse/api/filtering.py | 15 +------- synapse/events/utils.py | 72 +++++++++++++++++++++++++++++-------- tests/api/test_filtering.py | 6 ---- tests/events/test_utils.py | 39 ++++++++++++++++++++ 5 files changed, 99 insertions(+), 34 deletions(-) create mode 100644 changelog.d/15607.bugfix diff --git a/changelog.d/15607.bugfix b/changelog.d/15607.bugfix new file mode 100644 index 0000000000..a2767adbe2 --- /dev/null +++ b/changelog.d/15607.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where filters with multiple backslashes were rejected. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index de7c56bc0f..82aeef8d19 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -128,20 +128,7 @@ USER_FILTER_SCHEMA = { "account_data": {"$ref": "#/definitions/filter"}, "room": {"$ref": "#/definitions/room_filter"}, "event_format": {"type": "string", "enum": ["client", "federation"]}, - "event_fields": { - "type": "array", - "items": { - "type": "string", - # Don't allow '\\' in event field filters. This makes matching - # events a lot easier as we can then use a negative lookbehind - # assertion to split '\.' If we allowed \\ then it would - # incorrectly split '\\.' See synapse.events.utils.serialize_event - # - # Note that because this is a regular expression, we have to escape - # each backslash in the pattern. - "pattern": r"^((?!\\\\).)*$", - }, - }, + "event_fields": {"type": "array", "items": {"type": "string"}}, }, "additionalProperties": True, # Allow new fields for forward compatibility } diff --git a/synapse/events/utils.py b/synapse/events/utils.py index e6d040176b..e7b7b78b84 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -22,6 +22,7 @@ from typing import ( Iterable, List, Mapping, + Match, MutableMapping, Optional, Union, @@ -46,12 +47,10 @@ if TYPE_CHECKING: from synapse.handlers.relations import BundledAggregations -# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' -# (? None: sub_out_dict[key_to_move] = sub_dict[key_to_move] +def _escape_slash(m: Match[str]) -> str: + """ + Replacement function; replace a backslash-backslash or backslash-dot with the + second character. Leaves any other string alone. + """ + if m.group(1) in ("\\", "."): + return m.group(1) + return m.group(0) + + +def _split_field(field: str) -> List[str]: + """ + Splits strings on unescaped dots and removes escaping. + + Args: + field: A string representing a path to a field. + + Returns: + A list of nested fields to traverse. + """ + + # Convert the field and remove escaping: + # + # 1. "content.body.thing\.with\.dots" + # 2. ["content", "body", "thing\.with\.dots"] + # 3. ["content", "body", "thing.with.dots"] + + # Find all dots (and their preceding backslashes). If the dot is unescaped + # then emit a new field part. + result = [] + prev_start = 0 + for match in SPLIT_FIELD_REGEX.finditer(field): + # If the match is an *even* number of characters than the dot was escaped. + if len(match.group()) % 2 == 0: + continue + + # Add a new part (up to the dot, exclusive) after escaping. + result.append( + ESCAPE_SEQUENCE_PATTERN.sub( + _escape_slash, field[prev_start : match.end() - 1] + ) + ) + prev_start = match.end() + + # Add any part of the field after the last unescaped dot. (Note that if the + # character is a dot this correctly adds a blank string.) + result.append(re.sub(r"\\(.)", _escape_slash, field[prev_start:])) + + return result + + def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: """Return a new dict with only the fields in 'dictionary' which are present in 'fields'. @@ -260,7 +310,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: If there are no event fields specified then all fields are included. The entries may include '.' characters to indicate sub-fields. So ['content.body'] will include the 'body' field of the 'content' object. - A literal '.' character in a field name may be escaped using a '\'. + A literal '.' or '\' character in a field name may be escaped using a '\'. Args: dictionary: The dictionary to read from. @@ -275,13 +325,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict: # for each field, convert it: # ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]] - split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields] - - # for each element of the output array of arrays: - # remove escaping so we can use the right key names. - split_fields[:] = [ - [f.replace(r"\.", r".") for f in field_array] for field_array in split_fields - ] + split_fields = [_split_field(f) for f in fields] output: JsonDict = {} for field_array in split_fields: diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 222449baac..aa6af5ad7b 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -48,8 +48,6 @@ class FilteringTestCase(unittest.HomeserverTestCase): invalid_filters: List[JsonDict] = [ # `account_data` must be a dictionary {"account_data": "Hello World"}, - # `event_fields` entries must not contain backslashes - {"event_fields": [r"\\foo"]}, # `event_format` must be "client" or "federation" {"event_format": "other"}, # `not_rooms` must contain valid room IDs @@ -114,10 +112,6 @@ class FilteringTestCase(unittest.HomeserverTestCase): "event_format": "client", "event_fields": ["type", "content", "sender"], }, - # a single backslash should be permitted (though it is debatable whether - # it should be permitted before anything other than `.`, and what that - # actually means) - # # (note that event_fields is implemented in # synapse.events.utils.serialize_event, and so whether this actually works # is tested elsewhere. We just want to check that it is allowed through the diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index e40eac2eb0..c9a610db9a 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -16,6 +16,7 @@ import unittest as stdlib_unittest from typing import Any, List, Mapping, Optional import attr +from parameterized import parameterized from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions @@ -23,6 +24,7 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import ( PowerLevelsContent, SerializeEventConfig, + _split_field, copy_and_fixup_power_levels_contents, maybe_upsert_event_field, prune_event, @@ -794,3 +796,40 @@ class CopyPowerLevelsContentTestCase(stdlib_unittest.TestCase): def test_invalid_nesting_raises_type_error(self) -> None: with self.assertRaises(TypeError): copy_and_fixup_power_levels_contents({"a": {"b": {"c": 1}}}) # type: ignore[dict-item] + + +class SplitFieldTestCase(stdlib_unittest.TestCase): + @parameterized.expand( + [ + # A field with no dots. + ["m", ["m"]], + # Simple dotted fields. + ["m.foo", ["m", "foo"]], + ["m.foo.bar", ["m", "foo", "bar"]], + # Backslash is used as an escape character. + [r"m\.foo", ["m.foo"]], + [r"m\\.foo", ["m\\", "foo"]], + [r"m\\\.foo", [r"m\.foo"]], + [r"m\\\\.foo", ["m\\\\", "foo"]], + [r"m\foo", [r"m\foo"]], + [r"m\\foo", [r"m\foo"]], + [r"m\\\foo", [r"m\\foo"]], + [r"m\\\\foo", [r"m\\foo"]], + # Ensure that escapes at the end don't cause issues. + ["m.foo\\", ["m", "foo\\"]], + ["m.foo\\", ["m", "foo\\"]], + [r"m.foo\.", ["m", "foo."]], + [r"m.foo\\.", ["m", "foo\\", ""]], + [r"m.foo\\\.", ["m", r"foo\."]], + # Empty parts (corresponding to properties which are an empty string) are allowed. + [".m", ["", "m"]], + ["..m", ["", "", "m"]], + ["m.", ["m", ""]], + ["m..", ["m", "", ""]], + ["m..foo", ["m", "", "foo"]], + # Invalid escape sequences. + [r"\m", [r"\m"]], + ] + ) + def test_split_field(self, input: str, expected: str) -> None: + self.assertEqual(_split_field(input), expected) From 737f7ddf5873a28d4334dc7f6b25edbaaaf934c7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 22 May 2023 18:58:58 +0100 Subject: [PATCH 012/562] Remove outdated comment in log config (#15648) --- changelog.d/15648.doc | 1 + docs/sample_log_config.yaml | 4 +--- synapse/config/logger.py | 4 +--- 3 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15648.doc diff --git a/changelog.d/15648.doc b/changelog.d/15648.doc new file mode 100644 index 0000000000..70f65ebbff --- /dev/null +++ b/changelog.d/15648.doc @@ -0,0 +1 @@ +Remove outdated comment from the generated and sample homeserver log configs. \ No newline at end of file diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml index 6339160d00..ae0318122e 100644 --- a/docs/sample_log_config.yaml +++ b/docs/sample_log_config.yaml @@ -68,9 +68,7 @@ root: # Write logs to the `buffer` handler, which will buffer them together in memory, # then write them to a file. # - # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuration for the `twisted` logger above, in - # this case.) + # Replace "buffer" with "console" to log to stderr instead. # handlers: [buffer] diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 56db875b25..1e080133dc 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -117,9 +117,7 @@ root: # Write logs to the `buffer` handler, which will buffer them together in memory, # then write them to a file. # - # Replace "buffer" with "console" to log to stderr instead. (Note that you'll - # also need to update the configuration for the `twisted` logger above, in - # this case.) + # Replace "buffer" with "console" to log to stderr instead. # handlers: [buffer] From 1903c7e5edccc86f6d28aed33dc2995b43d941b7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 May 2023 13:49:01 -0500 Subject: [PATCH 013/562] Remove duplicate timestamp from test logs (`_trial_temp/test.log`) (#15636) Fix https://github.com/matrix-org/synapse/issues/15618 ### Before ``` 2023-05-17 22:51:36-0500 [-] 2023-05-17 22:51:36,889 - synapse.server - 338 - INFO - sentinel - Finished setting up. ``` ### After ``` 2023-05-19 18:16:20-0500 [-] synapse.server - 338 - INFO - sentinel - Finished setting up. ``` ### Dev notes The `Twisted.Logger` controls the `2023-05-19 18:16:20-0500 [-]` prefix, see : [`twisted/twisted` -> `src/twisted/logger/_format.py#L362-L374`](https://github.com/twisted/twisted/blob/34b161e66bc7c9f9efbb95e82c770a863933e498/src/twisted/logger/_format.py#L362-L374) And we delegate our logs to the Twisted Logger for the tests which puts it in `_trial_temp/test.log` --- changelog.d/15636.misc | 1 + tests/test_utils/logging_setup.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15636.misc diff --git a/changelog.d/15636.misc b/changelog.d/15636.misc new file mode 100644 index 0000000000..82329c5e43 --- /dev/null +++ b/changelog.d/15636.misc @@ -0,0 +1 @@ +Remove duplicate timestamp from test logs (`_trial_temp/test.log`). diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index b522163a34..c37f205ed0 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -40,10 +40,9 @@ def setup_logging() -> None: """ root_logger = logging.getLogger() - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - " - "%(levelname)s - %(request)s - %(message)s" - ) + # We exclude `%(asctime)s` from this format because the Twisted logger adds its own + # timestamp + log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s" handler = ToTwistedHandler() formatter = logging.Formatter(log_format) From 11ff4884e70457431ec2f816001f3772ac68a522 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 23 May 2023 10:57:39 +0100 Subject: [PATCH 014/562] 1.84.0 --- CHANGES.md | 9 +++++++++ changelog.d/15599.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15599.bugfix diff --git a/CHANGES.md b/CHANGES.md index 4877ba9d44..ca594a9532 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.84.0 (2023-05-23) +=========================== + +Bugfixes +-------- + +- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) + + Synapse 1.84.0rc1 (2023-05-16) ============================== diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix deleted file mode 100644 index b58af8ad55..0000000000 --- a/changelog.d/15599.bugfix +++ /dev/null @@ -1 +0,0 @@ -Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/debian/changelog b/debian/changelog index ad163add2b..51935e03b6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.84.0) stable; urgency=medium + + * New Synapse release 1.84.0. + + -- Synapse Packaging team Tue, 23 May 2023 10:57:22 +0100 + matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium * New Synapse release 1.84.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 86e1537a6d..9c77f9294a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.0rc1" +version = "1.84.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From ea6fcda98d56dd8f34712de5691e77c99fc5c0ae Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 23 May 2023 11:03:06 +0100 Subject: [PATCH 015/562] Tweak changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ca594a9532..dc564d5479 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.84.0 (2023-05-23) Bugfixes -------- -- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) +- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) Synapse 1.84.0rc1 (2023-05-16) From 5cae9158e67babe0553bc356802495a068222685 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 23 May 2023 11:13:38 +0100 Subject: [PATCH 016/562] Tweak changelog and upgrade notes --- CHANGES.md | 9 ++++++++- docs/upgrade.md | 23 +++++++++++++++++------ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index dc564d5479..e9397158f1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.84.0 (2023-05-23) =========================== +The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information. + Bugfixes -------- @@ -32,6 +34,12 @@ Bugfixes - Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571)) +Deprecations and Removals +------------------------- + +- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) + + Updates to the Docker image --------------------------- @@ -51,7 +59,6 @@ Internal Changes - Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025)) - Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470)) -- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) - Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509)) - Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522)) - Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527)) diff --git a/docs/upgrade.md b/docs/upgrade.md index 0625de8afb..af999dd91f 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -92,15 +92,22 @@ process, for example: ## Deprecation of `worker_replication_*` configuration settings -When using workers, +When using workers, + * `worker_replication_host` * `worker_replication_http_port` * `worker_replication_http_tls` -can now be removed from individual worker YAML configuration ***if*** you add the main process to the `instance_map` in the shared YAML configuration, -using the name `main`. +should now be removed from individual worker YAML configurations and the main process should instead be added to the `instance_map` +in the shared YAML configuration, using the name `main`. + +The old `worker_replication_*` settings are now considered deprecated and are expected to be removed in Synapse v1.88.0. + + +### Example change + +#### Before: -### Before: Shared YAML ```yaml instance_map: @@ -109,6 +116,7 @@ instance_map: port: 5678 tls: false ``` + Worker YAML ```yaml worker_app: synapse.app.generic_worker @@ -130,7 +138,10 @@ worker_listeners: worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml ``` -### After: + + +#### After: + Shared YAML ```yaml instance_map: @@ -143,6 +154,7 @@ instance_map: port: 5678 tls: false ``` + Worker YAML ```yaml worker_app: synapse.app.generic_worker @@ -165,7 +177,6 @@ Notes: * `tls` is optional but mirrors the functionality of `worker_replication_http_tls` - # Upgrading to v1.81.0 ## Application service path & authentication deprecations From 03042e435b23c82a1c911e7ca4011a333e3ecb71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 07:28:51 -0400 Subject: [PATCH 017/562] Bump requests from 2.28.2 to 2.31.0 (#15651) --- changelog.d/15651.misc | 1 + poetry.lock | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15651.misc diff --git a/changelog.d/15651.misc b/changelog.d/15651.misc new file mode 100644 index 0000000000..4d7c0248b2 --- /dev/null +++ b/changelog.d/15651.misc @@ -0,0 +1 @@ +Bump requests from 2.28.2 to 2.31.0. diff --git a/poetry.lock b/poetry.lock index 6f0374bb3f..3f8bf7c304 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2251,21 +2251,21 @@ md = ["cmarkgfm (>=0.8.0)"] [[package]] name = "requests" -version = "2.28.2" +version = "2.31.0" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=3.7, <4" +python-versions = ">=3.7" files = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, ] [package.dependencies] certifi = ">=2017.4.17" charset-normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" +urllib3 = ">=1.21.1,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] From 1df0221bda65cc90ee3a15d210b87e8065bc865f Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 23 May 2023 08:05:30 -0500 Subject: [PATCH 018/562] Use a custom scheme & the worker name for replication requests. (#15578) All the information needed is already in the `instance_map`, so use that instead of passing the hostname / IP & port manually for each replication request. This consolidates logic for future improvements of using e.g. UNIX sockets for workers. --- changelog.d/15578.misc | 1 + synapse/http/client.py | 1 + synapse/http/replicationagent.py | 34 ++++++++++++++++++++++++------- synapse/replication/http/_base.py | 18 ++++++---------- 4 files changed, 35 insertions(+), 19 deletions(-) create mode 100644 changelog.d/15578.misc diff --git a/changelog.d/15578.misc b/changelog.d/15578.misc new file mode 100644 index 0000000000..a54422239b --- /dev/null +++ b/changelog.d/15578.misc @@ -0,0 +1 @@ +Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. diff --git a/synapse/http/client.py b/synapse/http/client.py index f1ab7a8bc9..09ea93e10d 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -835,6 +835,7 @@ class ReplicationClient(BaseHttpClient): self.agent: IAgent = ReplicationAgent( hs.get_reactor(), + hs.config.worker.instance_map, contextFactory=hs.get_http_client_context_factory(), pool=pool, ) diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py index 5ecd08be0f..800f21873d 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import Optional +from typing import Dict, Optional from zope.interface import implementer @@ -32,6 +32,7 @@ from twisted.web.iweb import ( IResponse, ) +from synapse.config.workers import InstanceLocationConfig from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -44,9 +45,11 @@ class ReplicationEndpointFactory: def __init__( self, reactor: ISynapseReactor, + instance_map: Dict[str, InstanceLocationConfig], context_factory: IPolicyForHTTPS, ) -> None: self.reactor = reactor + self.instance_map = instance_map self.context_factory = context_factory def endpointForURI(self, uri: URI) -> IStreamClientEndpoint: @@ -58,15 +61,29 @@ class ReplicationEndpointFactory: Returns: The correct client endpoint object """ - if uri.scheme in (b"http", b"https"): - endpoint = HostnameEndpoint(self.reactor, uri.host, uri.port) - if uri.scheme == b"https": + # The given URI has a special scheme and includes the worker name. The + # actual connection details are pulled from the instance map. + worker_name = uri.netloc.decode("utf-8") + scheme = self.instance_map[worker_name].scheme() + + if scheme in ("http", "https"): + endpoint = HostnameEndpoint( + self.reactor, + self.instance_map[worker_name].host, + self.instance_map[worker_name].port, + ) + if scheme == "https": endpoint = wrapClientTLS( - self.context_factory.creatorForNetloc(uri.host, uri.port), endpoint + # The 'port' argument below isn't actually used by the function + self.context_factory.creatorForNetloc( + self.instance_map[worker_name].host, + self.instance_map[worker_name].port, + ), + endpoint, ) return endpoint else: - raise SchemeNotSupported(f"Unsupported scheme: {uri.scheme!r}") + raise SchemeNotSupported(f"Unsupported scheme: {scheme}") @implementer(IAgent) @@ -80,6 +97,7 @@ class ReplicationAgent(_AgentBase): def __init__( self, reactor: ISynapseReactor, + instance_map: Dict[str, InstanceLocationConfig], contextFactory: IPolicyForHTTPS, connectTimeout: Optional[float] = None, bindAddress: Optional[bytes] = None, @@ -102,7 +120,9 @@ class ReplicationAgent(_AgentBase): created. """ _AgentBase.__init__(self, reactor, pool) - endpoint_factory = ReplicationEndpointFactory(reactor, contextFactory) + endpoint_factory = ReplicationEndpointFactory( + reactor, instance_map, contextFactory + ) self._endpointFactory = endpoint_factory def request( diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index dc7820f963..63cf24a14d 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -219,11 +219,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") - if instance_name in instance_map: - host = instance_map[instance_name].host - port = instance_map[instance_name].port - tls = instance_map[instance_name].tls - else: + if instance_name not in instance_map: raise Exception( "Instance %r not in 'instance_map' config" % (instance_name,) ) @@ -271,13 +267,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): "Unknown METHOD on %s replication endpoint" % (cls.NAME,) ) - # Here the protocol is hard coded to be http by default or https in case the replication - # port is set to have tls true. - scheme = "https" if tls else "http" - uri = "%s://%s:%s/_synapse/replication/%s/%s" % ( - scheme, - host, - port, + # Hard code a special scheme to show this only used for replication. The + # instance_name will be passed into the ReplicationEndpointFactory to + # determine connection details from the instance_map. + uri = "synapse-replication://%s/_synapse/replication/%s/%s" % ( + instance_name, cls.NAME, "/".join(url_args), ) From 7c9b91790c013d11ca88a9d01e0054939eda8523 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 23 May 2023 10:35:43 -0400 Subject: [PATCH 019/562] Consolidate logic to check for deactivated users. (#15634) This moves the deactivated user check to the method which all login types call. Additionally updates the application service tests to be more realistic by removing invalid tests and fixing server names. --- changelog.d/15634.bugfix | 1 + .../password_auth_provider_callbacks.md | 3 + synapse/appservice/__init__.py | 3 +- synapse/handlers/auth.py | 14 ++--- synapse/handlers/jwt.py | 19 +----- synapse/rest/client/login.py | 23 +++++++- tests/handlers/test_password_providers.py | 59 +++++++------------ 7 files changed, 55 insertions(+), 67 deletions(-) create mode 100644 changelog.d/15634.bugfix diff --git a/changelog.d/15634.bugfix b/changelog.d/15634.bugfix new file mode 100644 index 0000000000..ef39e8a689 --- /dev/null +++ b/changelog.d/15634.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where deactivated users were able to login in uncommon situations. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index 8275f7ebdc..d66ac7df31 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -46,6 +46,9 @@ instead. If the authentication is unsuccessful, the module must return `None`. +Note that the user is not automatically registered, the `register_user(..)` method of +the [module API](writing_a_module.html) can be used to lazily create users. + If multiple modules register an auth checker for the same login type but with different fields, Synapse will refuse to start. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 35c330a3c4..2260a8f589 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -86,6 +86,7 @@ class ApplicationService: url.rstrip("/") if isinstance(url, str) else None ) # url must not end with a slash self.hs_token = hs_token + # The full Matrix ID for this application service's sender. self.sender = sender self.namespaces = self._check_namespaces(namespaces) self.id = id @@ -212,7 +213,7 @@ class ApplicationService: True if the application service is interested in the user, False if not. """ return ( - # User is the appservice's sender_localpart user + # User is the appservice's configured sender_localpart user user_id == self.sender # User is in the appservice's user namespace or self.is_user_in_namespace(user_id) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 59e340974d..d001f2fb2f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -52,7 +52,6 @@ from synapse.api.errors import ( NotFoundError, StoreError, SynapseError, - UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.handlers.ui_auth import ( @@ -1419,12 +1418,6 @@ class AuthHandler: return None (user_id, password_hash) = lookupres - # If the password hash is None, the account has likely been deactivated - if not password_hash: - deactivated = await self.store.get_user_deactivated_status(user_id) - if deactivated: - raise UserDeactivatedError("This account has been deactivated") - result = await self.validate_hash(password, password_hash) if not result: logger.warning("Failed password login for user %s", user_id) @@ -1749,8 +1742,11 @@ class AuthHandler: registered. auth_provider_session_id: The session ID from the SSO IdP received during login. """ - # If the account has been deactivated, do not proceed with the login - # flow. + # If the account has been deactivated, do not proceed with the login. + # + # This gets checked again when the token is submitted but this lets us + # provide an HTML error page to the user (instead of issuing a token and + # having it error later). deactivated = await self.store.get_user_deactivated_status(registered_user_id) if deactivated: respond_with_html(request, 403, self._sso_account_deactivated_template) diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py index 5fddc0e315..740bf9b3c4 100644 --- a/synapse/handlers/jwt.py +++ b/synapse/handlers/jwt.py @@ -16,7 +16,7 @@ from typing import TYPE_CHECKING from authlib.jose import JsonWebToken, JWTClaims from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError -from synapse.api.errors import Codes, LoginError, StoreError, UserDeactivatedError +from synapse.api.errors import Codes, LoginError from synapse.types import JsonDict, UserID if TYPE_CHECKING: @@ -26,7 +26,6 @@ if TYPE_CHECKING: class JwtHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self._main_store = hs.get_datastores().main self.jwt_secret = hs.config.jwt.jwt_secret self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim @@ -34,7 +33,7 @@ class JwtHandler: self.jwt_issuer = hs.config.jwt.jwt_issuer self.jwt_audiences = hs.config.jwt.jwt_audiences - async def validate_login(self, login_submission: JsonDict) -> str: + def validate_login(self, login_submission: JsonDict) -> str: """ Authenticates the user for the /login API @@ -103,16 +102,4 @@ class JwtHandler: if user is None: raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) - user_id = UserID(user, self.hs.hostname).to_string() - - # If the account has been deactivated, do not proceed with the login - # flow. - try: - deactivated = await self._main_store.get_user_deactivated_status(user_id) - except StoreError: - # JWT lazily creates users, so they may not exist in the database yet. - deactivated = False - if deactivated: - raise UserDeactivatedError("This account has been deactivated") - - return user_id + return UserID(user, self.hs.hostname).to_string() diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index afdbf821b5..6ca61ffbd0 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( LoginError, NotApprovedError, SynapseError, + UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.api.urls import CLIENT_API_PREFIX @@ -84,6 +85,7 @@ class LoginRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs + self._main_store = hs.get_datastores().main # JWT configuration variables. self.jwt_enabled = hs.config.jwt.jwt_enabled @@ -112,13 +114,13 @@ class LoginRestServlet(RestServlet): self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( - store=hs.get_datastores().main, + store=self._main_store, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second, burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count, ) self._account_ratelimiter = Ratelimiter( - store=hs.get_datastores().main, + store=self._main_store, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second, burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count, @@ -280,6 +282,9 @@ class LoginRestServlet(RestServlet): login_submission, ratelimit=appservice.is_rate_limited(), should_issue_refresh_token=should_issue_refresh_token, + # The user represented by an appservice's configured sender_localpart + # is not actually created in Synapse. + should_check_deactivated=qualified_user_id != appservice.sender, ) async def _do_other_login( @@ -326,6 +331,7 @@ class LoginRestServlet(RestServlet): auth_provider_id: Optional[str] = None, should_issue_refresh_token: bool = False, auth_provider_session_id: Optional[str] = None, + should_check_deactivated: bool = True, ) -> LoginResponse: """Called when we've successfully authed the user and now need to actually login them in (e.g. create devices). This gets called on @@ -345,6 +351,11 @@ class LoginRestServlet(RestServlet): should_issue_refresh_token: True if this login should issue a refresh token alongside the access token. auth_provider_session_id: The session ID got during login from the SSO IdP. + should_check_deactivated: True if the user should be checked for + deactivation status before logging in. + + This exists purely for appservice's configured sender_localpart + which doesn't have an associated user in the database. Returns: Dictionary of account information after successful login. @@ -364,6 +375,12 @@ class LoginRestServlet(RestServlet): ) user_id = canonical_uid + # If the account has been deactivated, do not proceed with the login. + if should_check_deactivated: + deactivated = await self._main_store.get_user_deactivated_status(user_id) + if deactivated: + raise UserDeactivatedError("This account has been deactivated") + device_id = login_submission.get("device_id") # If device_id is present, check that device_id is not longer than a reasonable 512 characters @@ -458,7 +475,7 @@ class LoginRestServlet(RestServlet): Returns: The body of the JSON response. """ - user_id = await self.hs.get_jwt_handler().validate_login(login_submission) + user_id = self.hs.get_jwt_handler().validate_login(login_submission) return await self._complete_login( user_id, login_submission, diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index aa91bc0a3d..394006f5f3 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -18,13 +18,17 @@ from http import HTTPStatus from typing import Any, Dict, List, Optional, Type, Union from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse from synapse.api.constants import LoginType from synapse.api.errors import Codes from synapse.handlers.account import AccountHandler from synapse.module_api import ModuleApi from synapse.rest.client import account, devices, login, logout, register +from synapse.server import HomeServer from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.server import FakeChannel @@ -162,10 +166,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): CALLBACK_USERNAME = "get_username_for_registration" CALLBACK_DISPLAYNAME = "get_displayname_for_registration" - def setUp(self) -> None: + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: # we use a global mock device, so make sure we are starting with a clean slate mock_password_provider.reset_mock() - super().setUp() + + # The mock password provider doesn't register the users, so ensure they + # are registered first. + self.register_user("u", "not-the-tested-password") + self.register_user("user", "not-the-tested-password") @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) def test_password_only_auth_progiver_login_legacy(self) -> None: @@ -185,22 +195,12 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # login with mxid should work too - channel = self._send_password_login("@u:bz", "p") + channel = self._send_password_login("@u:test", "p") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@u:bz", channel.json_body["user_id"]) - mock_password_provider.check_password.assert_called_once_with("@u:bz", "p") + self.assertEqual("@u:test", channel.json_body["user_id"]) + mock_password_provider.check_password.assert_called_once_with("@u:test", "p") mock_password_provider.reset_mock() - # try a weird username / pass. Honestly it's unclear what we *expect* to happen - # in these cases, but at least we can guard against the API changing - # unexpectedly - channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ") - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"]) - mock_password_provider.check_password.assert_called_once_with( - "@ USER🙂NAME :test", " pASS😢word " - ) - @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) def test_password_only_auth_provider_ui_auth_legacy(self) -> None: self.password_only_auth_provider_ui_auth_test_body() @@ -208,10 +208,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): def password_only_auth_provider_ui_auth_test_body(self) -> None: """UI Auth should delegate correctly to the password provider""" - # create the user, otherwise access doesn't work - module_api = self.hs.get_module_api() - self.get_success(module_api.register_user("u")) - # log in twice, to get two devices mock_password_provider.check_password.return_value = make_awaitable(True) tok1 = self.login("u", "p") @@ -401,29 +397,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.check_auth.assert_not_called() mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", None) + ("@user:test", None) ) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@user:bz", channel.json_body["user_id"]) + self.assertEqual("@user:test", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} ) mock_password_provider.reset_mock() - # try a weird username. Again, it's unclear what we *expect* to happen - # in these cases, but at least we can guard against the API changing - # unexpectedly - mock_password_provider.check_auth.return_value = make_awaitable( - ("@ MALFORMED! :bz", None) - ) - channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ") - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"]) - mock_password_provider.check_auth.assert_called_once_with( - " USER🙂NAME ", "test.login_type", {"test_field": " abc "} - ) - @override_config(legacy_providers_config(LegacyCustomAuthProvider)) def test_custom_auth_provider_ui_auth_legacy(self) -> None: self.custom_auth_provider_ui_auth_test_body() @@ -465,7 +448,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # right params, but authing as the wrong user mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", None) + ("@user:test", None) ) body["auth"]["test_field"] = "foo" channel = self._delete_device(tok1, "dev2", body) @@ -498,11 +481,11 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): callback = Mock(return_value=make_awaitable(None)) mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:bz", callback) + ("@user:test", callback) ) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertEqual("@user:bz", channel.json_body["user_id"]) + self.assertEqual("@user:test", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} ) @@ -512,7 +495,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): call_args, call_kwargs = callback.call_args # should be one positional arg self.assertEqual(len(call_args), 1) - self.assertEqual(call_args[0]["user_id"], "@user:bz") + self.assertEqual(call_args[0]["user_id"], "@user:test") for p in ["user_id", "access_token", "device_id", "home_server"]: self.assertIn(p, call_args[0]) From 379eb2d7abc8e3215cc9fd14deefb975137c9494 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 23 May 2023 12:26:25 -0500 Subject: [PATCH 020/562] Fix `@trace` not wrapping some state methods that return coroutines correctly (#15647) ``` 2023-05-21 09:30:09,288 - synapse.logging.opentracing - 940 - ERROR - POST-1 - @trace may not have wrapped StateStorageController.get_state_for_groups correctly! The function is not async but returned a coroutine ``` Tracing instrumentation for these functions originally introduced in https://github.com/matrix-org/synapse/pull/15610 --- changelog.d/15647.bugfix | 1 + synapse/storage/controllers/state.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 changelog.d/15647.bugfix diff --git a/changelog.d/15647.bugfix b/changelog.d/15647.bugfix new file mode 100644 index 0000000000..2eff30f6e3 --- /dev/null +++ b/changelog.d/15647.bugfix @@ -0,0 +1 @@ +Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 06a80869eb..7089b0a1d8 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -16,7 +16,6 @@ from typing import ( TYPE_CHECKING, AbstractSet, Any, - Awaitable, Callable, Collection, Dict, @@ -175,9 +174,9 @@ class StateStorageController: @trace @tag_args - def _get_state_groups_from_groups( + async def _get_state_groups_from_groups( self, groups: List[int], state_filter: StateFilter - ) -> Awaitable[Dict[int, StateMap[str]]]: + ) -> Dict[int, StateMap[str]]: """Returns the state groups for a given set of groups, filtering on types of state events. @@ -190,7 +189,9 @@ class StateStorageController: Dict of state group to state map. """ - return self.stores.state._get_state_groups_from_groups(groups, state_filter) + return await self.stores.state._get_state_groups_from_groups( + groups, state_filter + ) @trace @tag_args @@ -349,9 +350,9 @@ class StateStorageController: @trace @tag_args - def get_state_for_groups( + async def get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None - ) -> Awaitable[Dict[int, MutableStateMap[str]]]: + ) -> Dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -363,7 +364,7 @@ class StateStorageController: Returns: Dict of state group to state map. """ - return self.stores.state._get_state_for_groups( + return await self.stores.state._get_state_for_groups( groups, state_filter or StateFilter.all() ) From 1f55c04cbca6dc56085896dd980defa26ffe3b5b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 24 May 2023 08:59:31 -0400 Subject: [PATCH 021/562] Improve type hints for cached decorator. (#15658) The cached decorators always return a Deferred, which was not properly propagated. It was close enough when wrapping coroutines, but failed if a bare function was wrapped. --- changelog.d/15658.misc | 1 + scripts-dev/mypy_synapse_plugin.py | 34 +++++++- synapse/storage/databases/main/roommember.py | 2 +- synapse/util/caches/descriptors.py | 6 +- tests/appservice/test_appservice.py | 82 +++++++------------- tests/storage/test_transactions.py | 11 ++- 6 files changed, 73 insertions(+), 63 deletions(-) create mode 100644 changelog.d/15658.misc diff --git a/changelog.d/15658.misc b/changelog.d/15658.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15658.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 2c377533c0..8058e9c993 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -18,10 +18,11 @@ can crop up, e.g the cache descriptors. from typing import Callable, Optional, Type +from mypy.erasetype import remove_instance_last_known_values from mypy.nodes import ARG_NAMED_OPT from mypy.plugin import MethodSigContext, Plugin from mypy.typeops import bind_self -from mypy.types import CallableType, NoneType, UnionType +from mypy.types import CallableType, Instance, NoneType, UnionType class SynapsePlugin(Plugin): @@ -92,10 +93,41 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: arg_names.append("on_invalidate") arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg. + # Finally we ensure the return type is a Deferred. + if ( + isinstance(signature.ret_type, Instance) + and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred" + ): + # If it is already a Deferred, nothing to do. + ret_type = signature.ret_type + else: + ret_arg = None + if isinstance(signature.ret_type, Instance): + # If a coroutine, wrap the coroutine's return type in a Deferred. + if signature.ret_type.type.fullname == "typing.Coroutine": + ret_arg = signature.ret_type.args[2] + + # If an awaitable, wrap the awaitable's final value in a Deferred. + elif signature.ret_type.type.fullname == "typing.Awaitable": + ret_arg = signature.ret_type.args[0] + + # Otherwise, wrap the return value in a Deferred. + if ret_arg is None: + ret_arg = signature.ret_type + + # This should be able to use ctx.api.named_generic_type, but that doesn't seem + # to find the correct symbol for anything more than 1 module deep. + # + # modules is not part of CheckerPluginInterface. The following is a combination + # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo. + sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined] + ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)]) + signature = signature.copy_modified( arg_types=arg_types, arg_names=arg_names, arg_kinds=arg_kinds, + ret_type=ret_type, ) return signature diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e068f27a10..ae9c201b87 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1099,7 +1099,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # `get_joined_hosts` is called with the "current" state group for the # room, and so consecutive calls will be for consecutive state groups # which point to the previous state group. - cache = await self._get_joined_hosts_cache(room_id) # type: ignore[misc] + cache = await self._get_joined_hosts_cache(room_id) # If the state group in the cache matches, we already have the data we need. if state_entry.state_group == cache.state_group: diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 81df71a0c5..8514a75a1c 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -220,7 +220,9 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): self.iterable = iterable self.prune_unread_entries = prune_unread_entries - def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: + def __get__( + self, obj: Optional[Any], owner: Optional[Type] + ) -> Callable[..., "defer.Deferred[Any]"]: cache: DeferredCache[CacheKey, Any] = DeferredCache( name=self.name, max_entries=self.max_entries, @@ -232,7 +234,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase): get_cache_key = self.cache_key_builder @functools.wraps(self.orig) - def _wrapped(*args: Any, **kwargs: Any) -> Any: + def _wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[Any]": # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index dee976356f..66753c60c4 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -from typing import Generator +from typing import Any, Generator from unittest.mock import Mock from twisted.internet import defer @@ -49,15 +49,13 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_user_id_prefix_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -65,15 +63,13 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_user_id_prefix_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.assertFalse( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -81,17 +77,15 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_member_is_checked( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.event.type = "m.room.member" self.event.state_key = "@irc_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -99,17 +93,15 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_id_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -117,25 +109,21 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_room_id_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org" self.assertFalse( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_regex_alias_match( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_regex_alias_match(self) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -145,10 +133,8 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @@ -192,7 +178,7 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_alias_no_match( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -213,7 +199,7 @@ class ApplicationServiceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_regex_multiple_matches( self, - ) -> Generator["defer.Deferred[object]", object, None]: + ) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -223,18 +209,14 @@ class ApplicationServiceTestCase(unittest.TestCase): self.store.get_local_users_in_room = simple_async_mock([]) self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_interested_in_self( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_interested_in_self(self) -> Generator["defer.Deferred[Any]", object, None]: # make sure invites get through self.service.sender = "@appservice:name" self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) @@ -243,18 +225,14 @@ class ApplicationServiceTestCase(unittest.TestCase): self.event.state_key = self.service.sender self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) @defer.inlineCallbacks - def test_member_list_match( - self, - ) -> Generator["defer.Deferred[object]", object, None]: + def test_member_list_match(self) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. self.store.get_local_users_in_room = simple_async_mock( @@ -265,10 +243,8 @@ class ApplicationServiceTestCase(unittest.TestCase): self.event.sender = "@xmpp_foobar:matrix.org" self.assertTrue( ( - yield defer.ensureDeferred( - self.service.is_interested_in_event( - self.event.event_id, self.event, self.store - ) + yield self.service.is_interested_in_event( + self.event.event_id, self.event, self.store ) ) ) diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index db9ee9955e..2fab84a529 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -33,15 +33,14 @@ class TransactionStoreTestCase(HomeserverTestCase): destination retries, as well as testing tht we can set and get correctly. """ - d = self.store.get_destination_retry_timings("example.com") - r = self.get_success(d) + r = self.get_success(self.store.get_destination_retry_timings("example.com")) self.assertIsNone(r) - d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100) - self.get_success(d) + self.get_success( + self.store.set_destination_retry_timings("example.com", 1000, 50, 100) + ) - d = self.store.get_destination_retry_timings("example.com") - r = self.get_success(d) + r = self.get_success(self.store.get_destination_retry_timings("example.com")) self.assertEqual( DestinationRetryTimings( From c7e9c1d5ae2fd0fa68b28c51a3bce503194c4718 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 May 2023 15:13:28 +0100 Subject: [PATCH 022/562] Speed up user directory rebuild for users some more... (#15665) --- changelog.d/15665.misc | 1 + .../storage/databases/main/user_directory.py | 194 +++++++++++------- 2 files changed, 117 insertions(+), 78 deletions(-) create mode 100644 changelog.d/15665.misc diff --git a/changelog.d/15665.misc b/changelog.d/15665.misc new file mode 100644 index 0000000000..7ad424d8df --- /dev/null +++ b/changelog.d/15665.misc @@ -0,0 +1 @@ +Speed up rebuilding of the user directory for local users. diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index b7d58978de..a0319575f0 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -17,6 +17,7 @@ import re import unicodedata from typing import ( TYPE_CHECKING, + Collection, Iterable, List, Mapping, @@ -45,7 +46,7 @@ from synapse.util.stringutils import non_null_str_or_none if TYPE_CHECKING: from synapse.server import HomeServer -from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules +from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, UserTypes from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, @@ -356,13 +357,30 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): Add all local users to the user directory. """ - def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]: - sql = "SELECT user_id FROM %s LIMIT %s" % ( - TEMP_TABLE + "_users", - str(batch_size), - ) - txn.execute(sql) - user_result = cast(List[Tuple[str]], txn.fetchall()) + def _populate_user_directory_process_users_txn( + txn: LoggingTransaction, + ) -> Optional[int]: + if self.database_engine.supports_returning: + # Note: we use an ORDER BY in the SELECT to force usage of an + # index. Otherwise, postgres does a sequential scan that is + # surprisingly slow (I think due to the fact it will read/skip + # over lots of already deleted rows). + sql = f""" + DELETE FROM {TEMP_TABLE + "_users"} + WHERE user_id IN ( + SELECT user_id FROM {TEMP_TABLE + "_users"} ORDER BY user_id LIMIT ? + ) + RETURNING user_id + """ + txn.execute(sql, (batch_size,)) + user_result = cast(List[Tuple[str]], txn.fetchall()) + else: + sql = "SELECT user_id FROM %s ORDER BY user_id LIMIT %s" % ( + TEMP_TABLE + "_users", + str(batch_size), + ) + txn.execute(sql) + user_result = cast(List[Tuple[str]], txn.fetchall()) if not user_result: return None @@ -378,85 +396,81 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): assert count_result is not None progress["remaining"] = count_result[0] - return users_to_work_on + if not users_to_work_on: + return None - users_to_work_on = await self.db_pool.runInteraction( - "populate_user_directory_temp_read", _get_next_batch + logger.debug( + "Processing the next %d users of %d remaining", + len(users_to_work_on), + progress["remaining"], + ) + + # First filter down to users we want to insert into the user directory. + users_to_insert = self._filter_local_users_for_dir_txn( + txn, users_to_work_on + ) + + # Next fetch their profiles. Note that the `user_id` here is the + # *localpart*, and that not all users have profiles. + profile_rows = self.db_pool.simple_select_many_txn( + txn, + table="profiles", + column="user_id", + iterable=[get_localpart_from_id(u) for u in users_to_insert], + retcols=( + "user_id", + "displayname", + "avatar_url", + ), + keyvalues={}, + ) + profiles = { + f"@{row['user_id']}:{self.server_name}": _UserDirProfile( + f"@{row['user_id']}:{self.server_name}", + row["displayname"], + row["avatar_url"], + ) + for row in profile_rows + } + + profiles_to_insert = [ + profiles.get(user_id) or _UserDirProfile(user_id) + for user_id in users_to_insert + ] + + # Actually insert the users with their profiles into the directory. + self._update_profiles_in_user_dir_txn(txn, profiles_to_insert) + + # We've finished processing the users. Delete it from the table, if + # we haven't already. + if not self.database_engine.supports_returning: + self.db_pool.simple_delete_many_txn( + txn, + table=TEMP_TABLE + "_users", + column="user_id", + values=users_to_work_on, + keyvalues={}, + ) + + # Update the remaining counter. + progress["remaining"] -= len(users_to_work_on) + self.db_pool.updates._background_update_progress_txn( + txn, "populate_user_directory_process_users", progress + ) + return len(users_to_work_on) + + processed_count = await self.db_pool.runInteraction( + "populate_user_directory_temp", _populate_user_directory_process_users_txn ) # No more users -- complete the transaction. - if not users_to_work_on: + if not processed_count: await self.db_pool.updates._end_background_update( "populate_user_directory_process_users" ) return 1 - logger.debug( - "Processing the next %d users of %d remaining" - % (len(users_to_work_on), progress["remaining"]) - ) - - # First filter down to users we want to insert into the user directory. - users_to_insert = [ - user_id - for user_id in users_to_work_on - if await self.should_include_local_user_in_dir(user_id) - ] - - # Next fetch their profiles. Note that the `user_id` here is the - # *localpart*, and that not all users have profiles. - profile_rows = await self.db_pool.simple_select_many_batch( - table="profiles", - column="user_id", - iterable=[get_localpart_from_id(u) for u in users_to_insert], - retcols=( - "user_id", - "displayname", - "avatar_url", - ), - keyvalues={}, - desc="populate_user_directory_process_users_get_profiles", - ) - profiles = { - f"@{row['user_id']}:{self.server_name}": _UserDirProfile( - f"@{row['user_id']}:{self.server_name}", - row["displayname"], - row["avatar_url"], - ) - for row in profile_rows - } - - profiles_to_insert = [ - profiles.get(user_id) or _UserDirProfile(user_id) - for user_id in users_to_insert - ] - - # Actually insert the users with their profiles into the directory. - await self.db_pool.runInteraction( - "populate_user_directory_process_users_insertion", - self._update_profiles_in_user_dir_txn, - profiles_to_insert, - ) - - # We've finished processing the users. Delete it from the table. - await self.db_pool.simple_delete_many( - table=TEMP_TABLE + "_users", - column="user_id", - iterable=users_to_work_on, - keyvalues={}, - desc="populate_user_directory_process_users_delete", - ) - - # Update the remaining counter. - progress["remaining"] -= len(users_to_work_on) - await self.db_pool.runInteraction( - "populate_user_directory", - self.db_pool.updates._background_update_progress_txn, - "populate_user_directory_process_users", - progress, - ) - - return len(users_to_work_on) + return processed_count async def should_include_local_user_in_dir(self, user: str) -> bool: """Certain classes of local user are omitted from the user directory. @@ -494,6 +508,30 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): return True + def _filter_local_users_for_dir_txn( + self, txn: LoggingTransaction, users: Collection[str] + ) -> Collection[str]: + """A batched version of `should_include_local_user_in_dir`""" + users = [ + user + for user in users + if self.get_app_service_by_user_id(user) is None # type: ignore[attr-defined] + and not self.get_if_app_services_interested_in_user(user) # type: ignore[attr-defined] + ] + + rows = self.db_pool.simple_select_many_txn( + txn, + table="users", + column="name", + iterable=users, + keyvalues={ + "deactivated": 0, + }, + retcols=("name", "user_type"), + ) + + return [row["name"] for row in rows if row["user_type"] != UserTypes.SUPPORT] + async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool: """Check if the room is either world_readable or publically joinable""" From ca5c4be92166775ec1de9e79a04db1e136609a1f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 24 May 2023 10:18:52 -0400 Subject: [PATCH 023/562] Add type hints to test_descriptors. (#15659) Require type hints in test_descriptors and add missing ones. --- changelog.d/15659.misc | 1 + mypy.ini | 3 - tests/util/caches/test_descriptors.py | 197 ++++++++++++++------------ 3 files changed, 105 insertions(+), 96 deletions(-) create mode 100644 changelog.d/15659.misc diff --git a/changelog.d/15659.misc b/changelog.d/15659.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15659.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 3363c6daee..a7ec66196d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,9 +32,6 @@ warn_unused_ignores = False [mypy-synapse.util.caches.treecache] disallow_untyped_defs = False -[mypy-tests.util.caches.test_descriptors] -disallow_untyped_defs = False - ;; Dependencies without annotations ;; Before ignoring a module, check to see if type stubs are available. ;; The `typeshed` project maintains stubs here: diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 13f1edd533..064f4987df 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -13,7 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Iterable, Set, Tuple, cast +from typing import ( + Any, + Dict, + Generator, + Iterable, + List, + NoReturn, + Optional, + Set, + Tuple, + cast, +) from unittest import mock from twisted.internet import defer, reactor @@ -29,7 +40,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors -from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.descriptors import _CacheContext, cached, cachedList from tests import unittest from tests.test_utils import get_awaitable_result @@ -37,21 +48,21 @@ from tests.test_utils import get_awaitable_result logger = logging.getLogger(__name__) -def run_on_reactor(): - d: "Deferred[int]" = defer.Deferred() +def run_on_reactor() -> "Deferred[int]": + d: "Deferred[int]" = Deferred() cast(IReactorTime, reactor).callLater(0, d.callback, 0) return make_deferred_yieldable(d) class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks - def test_cache(self): + def test_cache(self) -> Generator["Deferred[Any]", object, None]: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> str: return self.mock(arg1, arg2) obj = Cls() @@ -77,15 +88,15 @@ class DescriptorTestCase(unittest.TestCase): obj.mock.assert_not_called() @defer.inlineCallbacks - def test_cache_num_args(self): + def test_cache_num_args(self) -> Generator["Deferred[Any]", object, None]: """Only the first num_args arguments should matter to the cache""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached(num_args=1) - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> mock.Mock: return self.mock(arg1, arg2) obj = Cls() @@ -111,7 +122,7 @@ class DescriptorTestCase(unittest.TestCase): obj.mock.assert_not_called() @defer.inlineCallbacks - def test_cache_uncached_args(self): + def test_cache_uncached_args(self) -> Generator["Deferred[Any]", object, None]: """ Only the arguments not named in uncached_args should matter to the cache @@ -123,10 +134,10 @@ class DescriptorTestCase(unittest.TestCase): # Note that it is important that this is not the last argument to # test behaviour of skipping arguments properly. @descriptors.cached(uncached_args=("arg2",)) - def fn(self, arg1, arg2, arg3): + def fn(self, arg1: int, arg2: int, arg3: int) -> str: return self.mock(arg1, arg2, arg3) - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() obj = Cls() @@ -152,15 +163,15 @@ class DescriptorTestCase(unittest.TestCase): obj.mock.assert_not_called() @defer.inlineCallbacks - def test_cache_kwargs(self): + def test_cache_kwargs(self) -> Generator["Deferred[Any]", object, None]: """Test that keyword arguments are treated properly""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, kwarg1=2): + def fn(self, arg1: int, kwarg1: int = 2) -> str: return self.mock(arg1, kwarg1=kwarg1) obj = Cls() @@ -188,12 +199,12 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "fish") obj.mock.assert_not_called() - def test_cache_with_sync_exception(self): + def test_cache_with_sync_exception(self) -> None: """If the wrapped function throws synchronously, things should continue to work""" class Cls: @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> NoReturn: raise SynapseError(100, "mai spoon iz too big!!1") obj = Cls() @@ -209,15 +220,15 @@ class DescriptorTestCase(unittest.TestCase): d = obj.fn(1) self.failureResultOf(d, SynapseError) - def test_cache_with_async_exception(self): + def test_cache_with_async_exception(self) -> None: """The wrapped function returns a failure""" class Cls: - result = None + result: Optional[Deferred] = None call_count = 0 @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> Optional[Deferred]: self.call_count += 1 return self.result @@ -225,7 +236,7 @@ class DescriptorTestCase(unittest.TestCase): callbacks: Set[str] = set() # set off an asynchronous request - origin_d: Deferred = defer.Deferred() + origin_d: Deferred = Deferred() obj.result = origin_d d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1")) @@ -260,17 +271,17 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(self.successResultOf(d3), 100) self.assertEqual(obj.call_count, 2) - def test_cache_logcontexts(self): + def test_cache_logcontexts(self) -> Deferred: """Check that logcontexts are set and restored correctly when using the cache.""" - complete_lookup: Deferred = defer.Deferred() + complete_lookup: Deferred = Deferred() class Cls: @descriptors.cached() - def fn(self, arg1): + def fn(self, arg1: int) -> "Deferred[int]": @defer.inlineCallbacks - def inner_fn(): + def inner_fn() -> Generator["Deferred[object]", object, int]: with PreserveLoggingContext(): yield complete_lookup return 1 @@ -278,13 +289,13 @@ class DescriptorTestCase(unittest.TestCase): return inner_fn() @defer.inlineCallbacks - def do_lookup(): + def do_lookup() -> Generator["Deferred[Any]", object, int]: with LoggingContext("c1") as c1: r = yield obj.fn(1) self.assertEqual(current_context(), c1) - return r + return cast(int, r) - def check_result(r): + def check_result(r: int) -> None: self.assertEqual(r, 1) obj = Cls() @@ -304,15 +315,15 @@ class DescriptorTestCase(unittest.TestCase): return defer.gatherResults([d1, d2]) - def test_cache_logcontexts_with_exception(self): + def test_cache_logcontexts_with_exception(self) -> "Deferred[None]": """Check that the cache sets and restores logcontexts correctly when the lookup function throws an exception""" class Cls: @descriptors.cached() - def fn(self, arg1): + def fn(self, arg1: int) -> Deferred: @defer.inlineCallbacks - def inner_fn(): + def inner_fn() -> Generator["Deferred[Any]", object, NoReturn]: # we want this to behave like an asynchronous function yield run_on_reactor() raise SynapseError(400, "blah") @@ -320,7 +331,7 @@ class DescriptorTestCase(unittest.TestCase): return inner_fn() @defer.inlineCallbacks - def do_lookup(): + def do_lookup() -> Generator["Deferred[object]", object, None]: with LoggingContext("c1") as c1: try: d = obj.fn(1) @@ -347,13 +358,13 @@ class DescriptorTestCase(unittest.TestCase): return d1 @defer.inlineCallbacks - def test_cache_default_args(self): + def test_cache_default_args(self) -> Generator["Deferred[Any]", object, None]: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2=2, arg3=3): + def fn(self, arg1: int, arg2: int = 2, arg3: int = 3) -> str: return self.mock(arg1, arg2, arg3) obj = Cls() @@ -384,13 +395,13 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "chips") obj.mock.assert_not_called() - def test_cache_iterable(self): + def test_cache_iterable(self) -> None: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached(iterable=True) - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> List[str]: return self.mock(arg1, arg2) obj = Cls() @@ -417,12 +428,12 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r.result, ["chips"]) obj.mock.assert_not_called() - def test_cache_iterable_with_sync_exception(self): + def test_cache_iterable_with_sync_exception(self) -> None: """If the wrapped function throws synchronously, things should continue to work""" class Cls: @descriptors.cached(iterable=True) - def fn(self, arg1): + def fn(self, arg1: int) -> NoReturn: raise SynapseError(100, "mai spoon iz too big!!1") obj = Cls() @@ -438,20 +449,20 @@ class DescriptorTestCase(unittest.TestCase): d = obj.fn(1) self.failureResultOf(d, SynapseError) - def test_invalidate_cascade(self): + def test_invalidate_cascade(self) -> None: """Invalidations should cascade up through cache contexts""" class Cls: @cached(cache_context=True) - async def func1(self, key, cache_context): + async def func1(self, key: str, cache_context: _CacheContext) -> int: return await self.func2(key, on_invalidate=cache_context.invalidate) @cached(cache_context=True) - async def func2(self, key, cache_context): + async def func2(self, key: str, cache_context: _CacheContext) -> int: return await self.func3(key, on_invalidate=cache_context.invalidate) @cached(cache_context=True) - async def func3(self, key, cache_context): + async def func3(self, key: str, cache_context: _CacheContext) -> int: self.invalidate = cache_context.invalidate return 42 @@ -463,13 +474,13 @@ class DescriptorTestCase(unittest.TestCase): obj.invalidate() top_invalidate.assert_called_once() - def test_cancel(self): + def test_cancel(self) -> None: """Test that cancelling a lookup does not cancel other lookups""" complete_lookup: "Deferred[None]" = Deferred() class Cls: @cached() - async def fn(self, arg1): + async def fn(self, arg1: int) -> str: await complete_lookup return str(arg1) @@ -488,7 +499,7 @@ class DescriptorTestCase(unittest.TestCase): self.failureResultOf(d1, CancelledError) self.assertEqual(d2.result, "123") - def test_cancel_logcontexts(self): + def test_cancel_logcontexts(self) -> None: """Test that cancellation does not break logcontexts. * The `CancelledError` must be raised with the correct logcontext. @@ -501,14 +512,14 @@ class DescriptorTestCase(unittest.TestCase): inner_context_was_finished = False @cached() - async def fn(self, arg1): + async def fn(self, arg1: int) -> str: await make_deferred_yieldable(complete_lookup) self.inner_context_was_finished = current_context().finished return str(arg1) obj = Cls() - async def do_lookup(): + async def do_lookup() -> None: with LoggingContext("c1") as c1: try: await obj.fn(123) @@ -542,10 +553,10 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): """ @defer.inlineCallbacks - def test_passthrough(self): + def test_passthrough(self) -> Generator["Deferred[Any]", object, None]: class A: @cached() - def func(self, key): + def func(self, key: str) -> str: return key a = A() @@ -554,12 +565,12 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual((yield a.func("bar")), "bar") @defer.inlineCallbacks - def test_hit(self): + def test_hit(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @@ -572,12 +583,12 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount[0], 1) @defer.inlineCallbacks - def test_invalidate(self): + def test_invalidate(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @@ -592,21 +603,21 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount[0], 2) - def test_invalidate_missing(self): + def test_invalidate_missing(self) -> None: class A: @cached() - def func(self, key): + def func(self, key: str) -> str: return key A().func.invalidate(("what",)) @defer.inlineCallbacks - def test_max_entries(self): + def test_max_entries(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] class A: @cached(max_entries=10) - def func(self, key): + def func(self, key: int) -> int: callcount[0] += 1 return key @@ -626,14 +637,14 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0]) ) - def test_prefill(self): + def test_prefill(self) -> None: callcount = [0] d = defer.succeed(123) class A: @cached() - def func(self, key): + def func(self, key: str) -> "Deferred[int]": callcount[0] += 1 return d @@ -645,18 +656,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount[0], 0) @defer.inlineCallbacks - def test_invalidate_context(self): + def test_invalidate_context(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] callcount2 = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @cached(cache_context=True) - def func2(self, key, cache_context): + def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]": callcount2[0] += 1 return self.func(key, on_invalidate=cache_context.invalidate) @@ -678,18 +689,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount2[0], 2) @defer.inlineCallbacks - def test_eviction_context(self): + def test_eviction_context(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] callcount2 = [0] class A: @cached(max_entries=2) - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @cached(cache_context=True) - def func2(self, key, cache_context): + def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]": callcount2[0] += 1 return self.func(key, on_invalidate=cache_context.invalidate) @@ -715,18 +726,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): self.assertEqual(callcount2[0], 3) @defer.inlineCallbacks - def test_double_get(self): + def test_double_get(self) -> Generator["Deferred[Any]", object, None]: callcount = [0] callcount2 = [0] class A: @cached() - def func(self, key): + def func(self, key: str) -> str: callcount[0] += 1 return key @cached(cache_context=True) - def func2(self, key, cache_context): + def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]": callcount2[0] += 1 return self.func(key, on_invalidate=cache_context.invalidate) @@ -763,17 +774,17 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks - def test_cache(self): + def test_cache(self) -> Generator["Deferred[Any]", object, None]: class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> None: pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - async def list_fn(self, args1, arg2): + async def list_fn(self, args1: Iterable[int], arg2: int) -> Dict[int, str]: context = current_context() assert isinstance(context, LoggingContext) assert context.name == "c1" @@ -824,19 +835,19 @@ class CachedListDescriptorTestCase(unittest.TestCase): obj.mock.assert_called_once_with({40}, 2) self.assertEqual(r, {10: "fish", 40: "gravy"}) - def test_concurrent_lookups(self): + def test_concurrent_lookups(self) -> None: """All concurrent lookups should get the same result""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1): + def fn(self, arg1: int) -> None: pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - def list_fn(self, args1) -> "Deferred[dict]": + def list_fn(self, args1: List[int]) -> "Deferred[dict]": return self.mock(args1) obj = Cls() @@ -867,19 +878,19 @@ class CachedListDescriptorTestCase(unittest.TestCase): self.assertEqual(self.successResultOf(d3), {10: "peas"}) @defer.inlineCallbacks - def test_invalidate(self): + def test_invalidate(self) -> Generator["Deferred[Any]", object, None]: """Make sure that invalidation callbacks are called.""" class Cls: - def __init__(self): + def __init__(self) -> None: self.mock = mock.Mock() @descriptors.cached() - def fn(self, arg1, arg2): + def fn(self, arg1: int, arg2: int) -> None: pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - async def list_fn(self, args1, arg2): + async def list_fn(self, args1: List[int], arg2: int) -> Dict[int, str]: # we want this to behave like an asynchronous function await run_on_reactor() return self.mock(args1, arg2) @@ -908,17 +919,17 @@ class CachedListDescriptorTestCase(unittest.TestCase): invalidate0.assert_called_once() invalidate1.assert_called_once() - def test_cancel(self): + def test_cancel(self) -> None: """Test that cancelling a lookup does not cancel other lookups""" complete_lookup: "Deferred[None]" = Deferred() class Cls: @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> None: pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args): + async def list_fn(self, args: List[int]) -> Dict[int, str]: await complete_lookup return {arg: str(arg) for arg in args} @@ -936,7 +947,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): self.failureResultOf(d1, CancelledError) self.assertEqual(d2.result, {123: "123", 456: "456", 789: "789"}) - def test_cancel_logcontexts(self): + def test_cancel_logcontexts(self) -> None: """Test that cancellation does not break logcontexts. * The `CancelledError` must be raised with the correct logcontext. @@ -949,18 +960,18 @@ class CachedListDescriptorTestCase(unittest.TestCase): inner_context_was_finished = False @cached() - def fn(self, arg1): + def fn(self, arg1: int) -> None: pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args): + async def list_fn(self, args: List[int]) -> Dict[int, str]: await make_deferred_yieldable(complete_lookup) self.inner_context_was_finished = current_context().finished return {arg: str(arg) for arg in args} obj = Cls() - async def do_lookup(): + async def do_lookup() -> None: with LoggingContext("c1") as c1: try: await obj.list_fn([123]) @@ -983,7 +994,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): ) self.assertEqual(current_context(), SENTINEL_CONTEXT) - def test_num_args_mismatch(self): + def test_num_args_mismatch(self) -> None: """ Make sure someone does not accidentally use @cachedList on a method with a mismatch in the number args to the underlying single cache method. @@ -991,14 +1002,14 @@ class CachedListDescriptorTestCase(unittest.TestCase): class Cls: @descriptors.cached(tree=True) - def fn(self, room_id, event_id): + def fn(self, room_id: str, event_id: str) -> None: pass # This is wrong ❌. `@cachedList` expects to be given the same number # of arguments as the underlying cached function, just with one of # the arguments being an iterable @descriptors.cachedList(cached_method_name="fn", list_name="keys") - def list_fn(self, keys: Iterable[Tuple[str, str]]): + def list_fn(self, keys: Iterable[Tuple[str, str]]) -> None: pass # Corrected syntax ✅ From 8839b6c2f8b07d5d122a15e79b1ebdbdd5f3e26b Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 24 May 2023 13:23:26 -0700 Subject: [PATCH 024/562] Add requesting user id parameter to key claim methods in `TransportLayerClient` (#15663) --- changelog.d/15663.misc | 1 + synapse/federation/federation_client.py | 6 ++++-- synapse/federation/transport/client.py | 16 +++++++++++++--- synapse/handlers/e2e_keys.py | 3 ++- synapse/rest/client/keys.py | 8 ++++---- tests/handlers/test_e2e_keys.py | 16 +++++++++++++++- 6 files changed, 39 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15663.misc diff --git a/changelog.d/15663.misc b/changelog.d/15663.misc new file mode 100644 index 0000000000..cc5f801543 --- /dev/null +++ b/changelog.d/15663.misc @@ -0,0 +1 @@ +Add requesting user id parameter to key claim methods in `TransportLayerClient`. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 076b9287c6..a2cf3a96c6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -236,6 +236,7 @@ class FederationClient(FederationBase): async def claim_client_keys( self, + user: UserID, destination: str, query: Dict[str, Dict[str, Dict[str, int]]], timeout: Optional[int], @@ -243,6 +244,7 @@ class FederationClient(FederationBase): """Claims one-time keys for a device hosted on a remote server. Args: + user: The user id of the requesting user destination: Domain name of the remote homeserver content: The query content. @@ -279,7 +281,7 @@ class FederationClient(FederationBase): if use_unstable: try: return await self.transport_layer.claim_client_keys_unstable( - destination, unstable_content, timeout + user, destination, unstable_content, timeout ) except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, @@ -295,7 +297,7 @@ class FederationClient(FederationBase): logger.debug("Skipping unstable claim client keys API") return await self.transport_layer.claim_client_keys( - destination, content, timeout + user, destination, content, timeout ) @trace diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1cfc4446c4..0b17f713ea 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -45,7 +45,7 @@ from synapse.events import EventBase, make_event_from_dict from synapse.federation.units import Transaction from synapse.http.matrixfederationclient import ByteParser, LegacyJsonSendParser from synapse.http.types import QueryParams -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import ExceptionBundle if TYPE_CHECKING: @@ -630,7 +630,11 @@ class TransportLayerClient: ) async def claim_client_keys( - self, destination: str, query_content: JsonDict, timeout: Optional[int] + self, + user: UserID, + destination: str, + query_content: JsonDict, + timeout: Optional[int], ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -655,6 +659,7 @@ class TransportLayerClient: } Args: + user: the user_id of the requesting user destination: The server to query. query_content: The user ids to query. Returns: @@ -671,7 +676,11 @@ class TransportLayerClient: ) async def claim_client_keys_unstable( - self, destination: str, query_content: JsonDict, timeout: Optional[int] + self, + user: UserID, + destination: str, + query_content: JsonDict, + timeout: Optional[int], ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. @@ -696,6 +705,7 @@ class TransportLayerClient: } Args: + user: the user_id of the requesting user destination: The server to query. query_content: The user ids to query. Returns: diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 24741b667b..ad075497c8 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -661,6 +661,7 @@ class E2eKeysHandler: async def claim_one_time_keys( self, query: Dict[str, Dict[str, Dict[str, int]]], + user: UserID, timeout: Optional[int], always_include_fallback_keys: bool, ) -> JsonDict: @@ -703,7 +704,7 @@ class E2eKeysHandler: device_keys = remote_queries[destination] try: remote_result = await self.federation.claim_client_keys( - destination, device_keys, timeout=timeout + user, destination, device_keys, timeout=timeout ) for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 9bbab5e624..413edd8a4d 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -287,7 +287,7 @@ class OneTimeKeyServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) + requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) @@ -298,7 +298,7 @@ class OneTimeKeyServlet(RestServlet): query.setdefault(user_id, {})[device_id] = {algorithm: 1} result = await self.e2e_keys_handler.claim_one_time_keys( - query, timeout, always_include_fallback_keys=False + query, requester.user, timeout, always_include_fallback_keys=False ) return 200, result @@ -335,7 +335,7 @@ class UnstableOneTimeKeyServlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) + requester = await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) @@ -346,7 +346,7 @@ class UnstableOneTimeKeyServlet(RestServlet): query.setdefault(user_id, {})[device_id] = Counter(algorithms) result = await self.e2e_keys_handler.claim_one_time_keys( - query, timeout, always_include_fallback_keys=True + query, requester.user, timeout, always_include_fallback_keys=True ) return 200, result diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 72d0584061..2eaffe511e 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -27,7 +27,7 @@ from synapse.appservice import ApplicationService from synapse.handlers.device import DeviceHandler from synapse.server import HomeServer from synapse.storage.databases.main.appservice import _make_exclusive_regex -from synapse.types import JsonDict +from synapse.types import JsonDict, UserID from synapse.util import Clock from tests import unittest @@ -45,6 +45,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() self.store = self.hs.get_datastores().main + self.requester = UserID.from_string(f"@test_requester:{self.hs.hostname}") def test_query_local_devices_no_devices(self) -> None: """If the user has no devices, we expect an empty list.""" @@ -161,6 +162,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): res2 = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -206,6 +208,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -225,6 +228,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -274,6 +278,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -286,6 +291,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -307,6 +313,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -348,6 +355,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -370,6 +378,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1080,6 +1089,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}, device_id_2: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=False, ) @@ -1125,6 +1135,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1169,6 +1180,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1202,6 +1214,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) @@ -1229,6 +1242,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): claim_res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id_1: {"alg1": 1}}}, + self.requester, timeout=None, always_include_fallback_keys=True, ) From 77156a4bc1f87e98754e3f7f86e52a84a4253a10 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 May 2023 23:22:24 -0500 Subject: [PATCH 025/562] Process previously failed backfill events in the background (#15585) Process previously failed backfill events in the background because they are bound to fail again and we don't need to waste time holding up the request for something that is bound to fail again. Fix https://github.com/matrix-org/synapse/issues/13623 Follow-up to https://github.com/matrix-org/synapse/issues/13621 and https://github.com/matrix-org/synapse/issues/13622 Part of making `/messages` faster: https://github.com/matrix-org/synapse/issues/13356 --- changelog.d/15585.feature | 1 + synapse/handlers/federation_event.py | 70 ++++++++++++-- .../databases/main/event_federation.py | 31 +++++- synapse/util/iterutils.py | 27 ++++++ tests/handlers/test_federation_event.py | 95 +++++++++++++++++++ tests/storage/test_event_federation.py | 37 ++++++++ 6 files changed, 252 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15585.feature diff --git a/changelog.d/15585.feature b/changelog.d/15585.feature new file mode 100644 index 0000000000..1adcfb69ee --- /dev/null +++ b/changelog.d/15585.feature @@ -0,0 +1 @@ +Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9a08618da5..42141d3670 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -88,7 +88,7 @@ from synapse.types import ( ) from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer, concurrently_execute -from synapse.util.iterutils import batch_iter +from synapse.util.iterutils import batch_iter, partition from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr @@ -865,7 +865,7 @@ class FederationEventHandler: [event.event_id for event in events] ) - new_events = [] + new_events: List[EventBase] = [] for event in events: event_id = event.event_id @@ -895,12 +895,66 @@ class FederationEventHandler: str(len(new_events)), ) - # We want to sort these by depth so we process them and - # tell clients about them in order. - sorted_events = sorted(new_events, key=lambda x: x.depth) - for ev in sorted_events: - with nested_logging_context(ev.event_id): - await self._process_pulled_event(origin, ev, backfilled=backfilled) + @trace + async def _process_new_pulled_events(new_events: Collection[EventBase]) -> None: + # We want to sort these by depth so we process them and tell clients about + # them in order. It's also more efficient to backfill this way (`depth` + # ascending) because one backfill event is likely to be the `prev_event` of + # the next event we're going to process. + sorted_events = sorted(new_events, key=lambda x: x.depth) + for ev in sorted_events: + with nested_logging_context(ev.event_id): + await self._process_pulled_event(origin, ev, backfilled=backfilled) + + # Check if we've already tried to process these events at some point in the + # past. We aren't concerned with the expontntial backoff here, just whether it + # has failed to be processed before. + event_ids_with_failed_pull_attempts = ( + await self._store.get_event_ids_with_failed_pull_attempts( + [event.event_id for event in new_events] + ) + ) + + # We construct the event lists in source order from `/backfill` response because + # it's a) easiest, but also b) the order in which we process things matters for + # MSC2716 historical batches because many historical events are all at the same + # `depth` and we rely on the tenuous sort that the other server gave us and hope + # they're doing their best. The brittle nature of this ordering for historical + # messages over federation is one of the reasons why we don't want to continue + # on MSC2716 until we have online topological ordering. + events_with_failed_pull_attempts, fresh_events = partition( + new_events, lambda e: e.event_id in event_ids_with_failed_pull_attempts + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "events_with_failed_pull_attempts", + str(event_ids_with_failed_pull_attempts), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "events_with_failed_pull_attempts.length", + str(len(events_with_failed_pull_attempts)), + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "fresh_events", + str([event.event_id for event in fresh_events]), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "fresh_events.length", + str(len(fresh_events)), + ) + + # Process previously failed backfill events in the background to not waste + # time on something that is likely to fail again. + if len(events_with_failed_pull_attempts) > 0: + run_as_background_process( + "_process_new_pulled_events_with_failed_pull_attempts", + _process_new_pulled_events, + events_with_failed_pull_attempts, + ) + + # We can optimistically try to process and wait for the event to be fully + # persisted if we've never tried before. + if len(fresh_events) > 0: + await _process_new_pulled_events(fresh_events) @trace @tag_args diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index ac19de183c..2681917d0b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -46,7 +46,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache @@ -1583,6 +1583,35 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause)) + @trace + async def get_event_ids_with_failed_pull_attempts( + self, event_ids: StrCollection + ) -> Set[str]: + """ + Filter the given list of `event_ids` and return events which have any failed + pull attempts. + + Args: + event_ids: A list of events to filter down. + + Returns: + A filtered down list of `event_ids` that have previous failed pull attempts. + """ + + rows = await self.db_pool.simple_select_many_batch( + table="event_failed_pull_attempts", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("event_id",), + desc="get_event_ids_with_failed_pull_attempts", + ) + event_ids_with_failed_pull_attempts: Set[str] = { + row["event_id"] for row in rows + } + + return event_ids_with_failed_pull_attempts + @trace async def get_event_ids_to_not_pull_from_backoff( self, diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 4938ddf703..a0efb96d3b 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -15,11 +15,13 @@ import heapq from itertools import islice from typing import ( + Callable, Collection, Dict, Generator, Iterable, Iterator, + List, Mapping, Set, Sized, @@ -71,6 +73,31 @@ def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]: return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen)) +def partition( + iterable: Iterable[T], predicate: Callable[[T], bool] +) -> Tuple[List[T], List[T]]: + """ + Separate a given iterable into two lists based on the result of a predicate function. + + Args: + iterable: the iterable to partition (separate) + predicate: a function that takes an item from the iterable and returns a boolean + + Returns: + A tuple of two lists, the first containing all items for which the predicate + returned True, the second containing all items for which the predicate returned + False + """ + true_results = [] + false_results = [] + for item in iterable: + if predicate(item): + true_results.append(item) + else: + false_results.append(item) + return true_results, false_results + + def sorted_topologically( nodes: Iterable[T], graph: Mapping[T, Collection[T]], diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index c067e5bfe3..23f1b33b2f 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -664,6 +664,101 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): StoreError, ) + def test_backfill_process_previously_failed_pull_attempt_event_in_the_background( + self, + ) -> None: + """ + Sanity check that events are still processed even if it is in the background + for events that already have failed pull attempts. + """ + OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" + main_store = self.hs.get_datastores().main + + # Create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(main_store.get_room_version(room_id)) + + # Allow the remote user to send state events + self.helper.send_state( + room_id, + "m.room.power_levels", + {"events_default": 0, "state_default": 0}, + tok=tok, + ) + + # Add the remote user to the room + member_event = self.get_success( + event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join") + ) + + initial_state_map = self.get_success( + main_store.get_partial_current_state_ids(room_id) + ) + + auth_event_ids = [ + initial_state_map[("m.room.create", "")], + initial_state_map[("m.room.power_levels", "")], + member_event.event_id, + ] + + # Create a regular event that should process + pulled_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "test_regular_type", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [ + member_event.event_id, + ], + "auth_events": auth_event_ids, + "origin_server_ts": 1, + "depth": 12, + "content": {"body": "pulled_event"}, + } + ), + room_version, + ) + + # Record a failed pull attempt for this event which will cause us to backfill it + # in the background from here on out. + self.get_success( + main_store.record_event_failed_pull_attempt( + room_id, pulled_event.event_id, "fake cause" + ) + ) + + # We expect an outbound request to /backfill, so stub that out + self.mock_federation_transport_client.backfill.return_value = make_awaitable( + { + "origin": self.OTHER_SERVER_NAME, + "origin_server_ts": 123, + "pdus": [ + pulled_event.get_pdu_json(), + ], + } + ) + + # The function under test: try to backfill and process the pulled event + with LoggingContext("test"): + self.get_success( + self.hs.get_federation_event_handler().backfill( + self.OTHER_SERVER_NAME, + room_id, + limit=1, + extremities=["$some_extremity"], + ) + ) + + # Ensure `run_as_background_process(...)` has a chance to run (essentially + # `wait_for_background_processes()`) + self.reactor.pump((0.1,)) + + # Make sure we processed and persisted the pulled event + self.get_success(main_store.get_event(pulled_event.event_id, allow_none=False)) + def test_process_pulled_event_with_rejected_missing_state(self) -> None: """Ensure that we correctly handle pulled events with missing state containing a rejected state event diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 81e50bdd55..4b8d8328d7 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -1134,6 +1134,43 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertEqual(backfill_event_ids, ["insertion_eventA"]) + def test_get_event_ids_with_failed_pull_attempts(self) -> None: + """ + Test to make sure we properly get event_ids based on whether they have any + failed pull attempts. + """ + # Create the room + user_id = self.register_user("alice", "test") + tok = self.login("alice", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + + self.get_success( + self.store.record_event_failed_pull_attempt( + room_id, "$failed_event_id1", "fake cause" + ) + ) + self.get_success( + self.store.record_event_failed_pull_attempt( + room_id, "$failed_event_id2", "fake cause" + ) + ) + + event_ids_with_failed_pull_attempts = self.get_success( + self.store.get_event_ids_with_failed_pull_attempts( + event_ids=[ + "$failed_event_id1", + "$fresh_event_id1", + "$failed_event_id2", + "$fresh_event_id2", + ] + ) + ) + + self.assertEqual( + event_ids_with_failed_pull_attempts, + {"$failed_event_id1", "$failed_event_id2"}, + ) + def test_get_event_ids_to_not_pull_from_backoff(self) -> None: """ Test to make sure only event IDs we should backoff from are returned. From 2d8a2ca374916e8a24ff43355c0ad24d456fab25 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 26 May 2023 10:53:10 +0000 Subject: [PATCH 026/562] Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. (#15673) * Add dch and notify-send to the Nix dev flake * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15673.misc | 1 + flake.nix | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/15673.misc diff --git a/changelog.d/15673.misc b/changelog.d/15673.misc new file mode 100644 index 0000000000..52148fc63f --- /dev/null +++ b/changelog.d/15673.misc @@ -0,0 +1 @@ +Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. \ No newline at end of file diff --git a/flake.nix b/flake.nix index 7351571e61..8c7a4f8769 100644 --- a/flake.nix +++ b/flake.nix @@ -100,6 +100,10 @@ # For building the Synapse documentation website. mdbook + + # For releasing Synapse + debian-devscripts # (`dch` for manipulating the Debian changelog) + libnotify # (the release script uses `notify-send` to tell you when CI jobs are done) ]; # Install Python and manage a virtualenv with Poetry. From 4e013093a87094c711eb047a41e2de3807c7873e Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 26 May 2023 05:46:13 -0600 Subject: [PATCH 027/562] Add MSC3820 (room version 11) option 2 unstable room version. (#15666) --- changelog.d/15666.misc | 1 + synapse/api/room_versions.py | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 changelog.d/15666.misc diff --git a/changelog.d/15666.misc b/changelog.d/15666.misc new file mode 100644 index 0000000000..92eae49952 --- /dev/null +++ b/changelog.d/15666.misc @@ -0,0 +1 @@ +Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 7030b133d3..035a14171b 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -485,6 +485,30 @@ class RoomVersions: msc3931_push_features=(), msc3989_redaction_rules=True, ) + MSC3820opt2 = RoomVersion( + # Based upon v10 + "org.matrix.msc3820.opt2", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2175_implicit_room_creator=True, # Used by MSC3820 + msc2176_redaction_rules=True, # Used by MSC3820 + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3389_relation_redactions=False, + msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=True, + msc3821_redaction_rules=True, # Used by MSC3820 + msc3931_push_features=(), + msc3989_redaction_rules=True, # Used by MSC3820 + ) KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { From c775d80b73b7930b9541e353fc24dcef66579e48 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 26 May 2023 14:28:55 +0000 Subject: [PATCH 028/562] Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. (#15672) * Fix #15669: always populate instance map even if it was empty * Fix some tests * Fix more tests * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * CI fix: don't forget to update apt repository sources before installing olddeps deps * Add test testing the backwards compatibility --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- .github/workflows/tests.yml | 1 + changelog.d/15672.bugfix | 1 + synapse/config/workers.py | 2 +- tests/app/test_homeserver_start.py | 2 ++ tests/app/test_openid_listener.py | 1 + tests/config/test_workers.py | 43 +++++++++++++++++++++--- tests/replication/test_federation_ack.py | 1 + tests/storage/test_rollback_worker.py | 1 + 8 files changed, 47 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15672.bugfix diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 51cbeb3298..ce3a57fb01 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -314,6 +314,7 @@ jobs: # There aren't wheels for some of the older deps, so we need to install # their build dependencies - run: | + sudo apt-get -qq update sudo apt-get -qq install build-essential libffi-dev python-dev \ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev diff --git a/changelog.d/15672.bugfix b/changelog.d/15672.bugfix new file mode 100644 index 0000000000..c81d7332b7 --- /dev/null +++ b/changelog.d/15672.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. \ No newline at end of file diff --git a/synapse/config/workers.py b/synapse/config/workers.py index d2311cc857..38e13dd7b5 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -222,7 +222,7 @@ class WorkerConfig(Config): # itself doesn't need this data as it would never have to talk to itself. instance_map: Dict[str, Any] = config.get("instance_map", {}) - if instance_map and self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: + if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: # The host used to connect to the main synapse main_host = config.get("worker_replication_host", None) diff --git a/tests/app/test_homeserver_start.py b/tests/app/test_homeserver_start.py index 788c935537..cd117b7394 100644 --- a/tests/app/test_homeserver_start.py +++ b/tests/app/test_homeserver_start.py @@ -25,6 +25,8 @@ class HomeserverAppStartTestCase(ConfigFileTestCase): # Add a blank line as otherwise the next addition ends up on a line with a comment self.add_lines_to_config([" "]) self.add_lines_to_config(["worker_app: test_worker_app"]) + self.add_lines_to_config(["worker_replication_host: 127.0.0.1"]) + self.add_lines_to_config(["worker_replication_http_port: 0"]) # Ensure that starting master process with worker config raises an exception with self.assertRaises(ConfigError): diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 2ee343d8a4..056d9402a4 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -42,6 +42,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): # have to tell the FederationHandler not to try to access stuff that is only # in the primary store. conf["worker_app"] = "yes" + conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return conf diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py index 49a6bdf408..086359fd71 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py @@ -17,7 +17,7 @@ from unittest.mock import Mock from immutabledict import immutabledict from synapse.config import ConfigError -from synapse.config.workers import WorkerConfig +from synapse.config.workers import InstanceLocationConfig, WorkerConfig from tests.unittest import TestCase @@ -94,6 +94,7 @@ class WorkerDutyConfigTestCase(TestCase): # so that it doesn't raise an exception here. # (This is not read by `_should_this_worker_perform_duty`.) "notify_appservices": False, + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) @@ -138,7 +139,9 @@ class WorkerDutyConfigTestCase(TestCase): """ main_process_config = self._make_worker_config( - worker_app="synapse.app.homeserver", worker_name=None + worker_app="synapse.app.homeserver", + worker_name=None, + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -203,6 +206,7 @@ class WorkerDutyConfigTestCase(TestCase): # so that it doesn't raise an exception here. # (This is not read by `_should_this_worker_perform_duty`.) "notify_appservices": False, + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) @@ -236,7 +240,9 @@ class WorkerDutyConfigTestCase(TestCase): Tests new config options. This is for the master's config. """ main_process_config = self._make_worker_config( - worker_app="synapse.app.homeserver", worker_name=None + worker_app="synapse.app.homeserver", + worker_name=None, + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -262,7 +268,9 @@ class WorkerDutyConfigTestCase(TestCase): Tests new config options. This is for the worker's config. """ appservice_worker_config = self._make_worker_config( - worker_app="synapse.app.generic_worker", worker_name="worker1" + worker_app="synapse.app.generic_worker", + worker_name="worker1", + extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}}, ) self.assertTrue( @@ -298,6 +306,7 @@ class WorkerDutyConfigTestCase(TestCase): extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) self.assertFalse(worker1_config.should_notify_appservices) @@ -309,7 +318,33 @@ class WorkerDutyConfigTestCase(TestCase): extras={ "notify_appservices_from_worker": "worker2", "update_user_directory_from_worker": "worker1", + "instance_map": {"main": {"host": "127.0.0.1", "port": 0}}, }, ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory) + + def test_worker_instance_map_compat(self) -> None: + """ + Test that `worker_replication_*` settings are compatibly handled by + adding them to the instance map as a `main` entry. + """ + + worker1_config = self._make_worker_config( + worker_app="synapse.app.generic_worker", + worker_name="worker1", + extras={ + "notify_appservices_from_worker": "worker2", + "update_user_directory_from_worker": "worker1", + "worker_replication_host": "127.0.0.42", + "worker_replication_http_port": 1979, + }, + ) + self.assertEqual( + worker1_config.instance_map, + { + "master": InstanceLocationConfig( + host="127.0.0.42", port=1979, tls=False + ), + }, + ) diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 12668b34c5..cf59b1a204 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -32,6 +32,7 @@ class FederationAckTestCase(HomeserverTestCase): config["worker_app"] = "synapse.app.generic_worker" config["worker_name"] = "federation_sender1" config["federation_sender_instances"] = ["federation_sender1"] + config["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return config def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index 966aafea6f..6861d3a6c9 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -55,6 +55,7 @@ class WorkerSchemaTests(HomeserverTestCase): # Mark this as a worker app. conf["worker_app"] = "yes" + conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}} return conf From 65bf5f3649fd108d91fe64795186d27940e80426 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 26 May 2023 16:17:50 +0100 Subject: [PATCH 029/562] 1.84.1 --- CHANGES.md | 19 +++++++++++++++++++ changelog.d/15672.bugfix | 1 - changelog.d/15673.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 26 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/15672.bugfix delete mode 100644 changelog.d/15673.misc diff --git a/CHANGES.md b/CHANGES.md index e9397158f1..1fe1d013c6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,22 @@ +Synapse 1.84.1 (2023-05-26) +=========================== + +This patch release fixes a major issue with homeservers that does not have an `instance_map` defined but which do use workers. +If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. ([\#15672](https://github.com/matrix-org/synapse/issues/15672)) + + +Internal Changes +---------------- + +- Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. ([\#15673](https://github.com/matrix-org/synapse/issues/15673)) + + Synapse 1.84.0 (2023-05-23) =========================== diff --git a/changelog.d/15672.bugfix b/changelog.d/15672.bugfix deleted file mode 100644 index c81d7332b7..0000000000 --- a/changelog.d/15672.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. \ No newline at end of file diff --git a/changelog.d/15673.misc b/changelog.d/15673.misc deleted file mode 100644 index 52148fc63f..0000000000 --- a/changelog.d/15673.misc +++ /dev/null @@ -1 +0,0 @@ -Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 51935e03b6..fbdc9c177e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.84.1) stable; urgency=medium + + * New Synapse release 1.84.1. + + -- Synapse Packaging team Fri, 26 May 2023 16:15:30 +0100 + matrix-synapse-py3 (1.84.0) stable; urgency=medium * New Synapse release 1.84.0. diff --git a/pyproject.toml b/pyproject.toml index 9c77f9294a..6e9bce65b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.0" +version = "1.84.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From cb6f4a84a6a8f2b79b80851f37eb5fa4c7c5264a Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 26 May 2023 16:18:35 +0100 Subject: [PATCH 030/562] Fix a typographical error in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 1fe1d013c6..85c9af8ce4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.84.1 (2023-05-26) =========================== -This patch release fixes a major issue with homeservers that does not have an `instance_map` defined but which do use workers. +This patch release fixes a major issue with homeservers that do not have an `instance_map` defined but which do use workers. If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. From 2ad91ec628126753590c1a90c432270d6c8fa8fd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 26 May 2023 13:16:08 -0400 Subject: [PATCH 031/562] Set thread_id column to non-null for event_push_{actions,actions_staging,summary} (#15597) Updates the database schema to require a thread_id (by adding a constraint that the column is non-null) for event_push_actions, event_push_actions_staging, and event_push_actions_summary. For PostgreSQL we add the constraint as NOT VALID, then VALIDATE the constraint a background job to avoid locking the table during an upgrade. Each table is updated as a separate schema delta to avoid deadlocks between them. For SQLite we simply rebuild the table & copy the data. --- changelog.d/15597.misc | 1 + synapse/storage/background_updates.py | 44 +++ .../databases/main/event_push_actions.py | 254 +++--------------- synapse/storage/schema/__init__.py | 3 + .../77/05thread_notifications_backfill.sql | 28 ++ ...06thread_notifications_not_null.sql.sqlite | 102 +++++++ ...s_not_null_event_push_actions.sql.postgres | 27 ++ ...ll_event_push_actions_staging.sql.postgres | 27 ++ ...s_not_null_event_push_summary.sql.postgres | 29 ++ 9 files changed, 292 insertions(+), 223 deletions(-) create mode 100644 changelog.d/15597.misc create mode 100644 synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres create mode 100644 synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres diff --git a/changelog.d/15597.misc b/changelog.d/15597.misc new file mode 100644 index 0000000000..2dea23784f --- /dev/null +++ b/changelog.d/15597.misc @@ -0,0 +1 @@ +Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index a99aea8926..ca085ef800 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -561,6 +561,50 @@ class BackgroundUpdater: updater, oneshot=True ) + def register_background_validate_constraint( + self, update_name: str, constraint_name: str, table: str + ) -> None: + """Helper for store classes to do a background validate constraint. + + This only applies on PostgreSQL. + + To use: + + 1. use a schema delta file to add a background update. Example: + INSERT INTO background_updates (update_name, progress_json) VALUES + ('validate_my_constraint', '{}'); + + 2. In the Store constructor, call this method + + Args: + update_name: update_name to register for + constraint_name: name of constraint to validate + table: table the constraint is applied to + """ + + def runner(conn: Connection) -> None: + c = conn.cursor() + + sql = f""" + ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}; + """ + logger.debug("[SQL] %s", sql) + c.execute(sql) + + async def updater(progress: JsonDict, batch_size: int) -> int: + assert isinstance( + self.db_pool.engine, engines.PostgresEngine + ), "validate constraint background update registered for non-Postres database" + + logger.info("Validating constraint %s to %s", constraint_name, table) + await self.db_pool.runWithConnection(runner) + await self._end_background_update(update_name) + return 1 + + self._background_update_handlers[update_name] = _BackgroundUpdateHandler( + updater, oneshot=True + ) + async def create_index_in_background( self, index_name: str, diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 6fdb1e292e..07bda7d6be 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -289,179 +289,52 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas unique=True, ) - self.db_pool.updates.register_background_update_handler( - "event_push_backfill_thread_id", - self._background_backfill_thread_id, + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_staging_thread_id", + constraint_name="event_push_actions_staging_thread_id", + table="event_push_actions_staging", ) - - # Indexes which will be used to quickly make the thread_id column non-null. - self.db_pool.updates.register_background_index_update( - "event_push_actions_thread_id_null", - index_name="event_push_actions_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_thread_id", + constraint_name="event_push_actions_thread_id", table="event_push_actions", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - self.db_pool.updates.register_background_index_update( - "event_push_summary_thread_id_null", - index_name="event_push_summary_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_summary_thread_id", + constraint_name="event_push_summary_thread_id", table="event_push_summary", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - # Check ASAP (and then later, every 1s) to see if we have finished - # background updates the event_push_actions and event_push_summary tables. - self._clock.call_later(0.0, self._check_event_push_backfill_thread_id) - self._event_push_backfill_thread_id_done = False - - @wrap_as_background_process("check_event_push_backfill_thread_id") - async def _check_event_push_backfill_thread_id(self) -> None: - """ - Has thread_id finished backfilling? - - If not, we need to just-in-time update it so the queries work. - """ - done = await self.db_pool.updates.has_completed_background_update( - "event_push_backfill_thread_id" + self.db_pool.updates.register_background_update_handler( + "event_push_drop_null_thread_id_indexes", + self._background_drop_null_thread_id_indexes, ) - if done: - self._event_push_backfill_thread_id_done = True - else: - # Reschedule to run. - self._clock.call_later(15.0, self._check_event_push_backfill_thread_id) - - async def _background_backfill_thread_id( + async def _background_drop_null_thread_id_indexes( self, progress: JsonDict, batch_size: int ) -> int: """ - Fill in the thread_id field for event_push_actions and event_push_summary. - - This is preparatory so that it can be made non-nullable in the future. - - Because all current (null) data is done in an unthreaded manner this - simply assumes it is on the "main" timeline. Since event_push_actions - are periodically cleared it is not possible to correctly re-calculate - the thread_id. + Drop the indexes used to find null thread_ids for event_push_actions and + event_push_summary. """ - event_push_actions_done = progress.get("event_push_actions_done", False) - def add_thread_id_txn( - txn: LoggingTransaction, start_stream_ordering: int - ) -> int: - sql = """ - SELECT stream_ordering - FROM event_push_actions - WHERE - thread_id IS NULL - AND stream_ordering > ? - ORDER BY stream_ordering - LIMIT ? - """ - txn.execute(sql, (start_stream_ordering, batch_size)) + def drop_null_thread_id_indexes_txn(txn: LoggingTransaction) -> None: + sql = "DROP INDEX IF EXISTS event_push_actions_thread_id_null" + logger.debug("[SQL] %s", sql) + txn.execute(sql) - # No more rows to process. - rows = txn.fetchall() - if not rows: - progress["event_push_actions_done"] = True - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - return 0 + sql = "DROP INDEX IF EXISTS event_push_summary_thread_id_null" + logger.debug("[SQL] %s", sql) + txn.execute(sql) - # Update the thread ID for any of those rows. - max_stream_ordering = rows[-1][0] - - sql = """ - UPDATE event_push_actions - SET thread_id = 'main' - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """ - txn.execute( - sql, - ( - start_stream_ordering, - max_stream_ordering, - ), - ) - - # Update progress. - processed_rows = txn.rowcount - progress["max_event_push_actions_stream_ordering"] = max_stream_ordering - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - def add_thread_id_summary_txn(txn: LoggingTransaction) -> int: - min_user_id = progress.get("max_summary_user_id", "") - min_room_id = progress.get("max_summary_room_id", "") - - # Slightly overcomplicated query for getting the Nth user ID / room - # ID tuple, or the last if there are less than N remaining. - sql = """ - SELECT user_id, room_id FROM ( - SELECT user_id, room_id FROM event_push_summary - WHERE (user_id, room_id) > (?, ?) - AND thread_id IS NULL - ORDER BY user_id, room_id - LIMIT ? - ) AS e - ORDER BY user_id DESC, room_id DESC - LIMIT 1 - """ - - txn.execute(sql, (min_user_id, min_room_id, batch_size)) - row = txn.fetchone() - if not row: - return 0 - - max_user_id, max_room_id = row - - sql = """ - UPDATE event_push_summary - SET thread_id = 'main' - WHERE - (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?) - AND thread_id IS NULL - """ - txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id)) - processed_rows = txn.rowcount - - progress["max_summary_user_id"] = max_user_id - progress["max_summary_room_id"] = max_room_id - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - # First update the event_push_actions table, then the event_push_summary table. - # - # Note that the event_push_actions_staging table is ignored since it is - # assumed that items in that table will only exist for a short period of - # time. - if not event_push_actions_done: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_txn, - progress.get("max_event_push_actions_stream_ordering", 0), - ) - else: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_summary_txn, - ) - - # Only done after the event_push_summary table is done. - if not result: - await self.db_pool.updates._end_background_update( - "event_push_backfill_thread_id" - ) - - return result + await self.db_pool.runInteraction( + "drop_null_thread_id_indexes_txn", + drop_null_thread_id_indexes_txn, + ) + await self.db_pool.updates._end_background_update( + "event_push_drop_null_thread_id_indexes" + ) + return 0 async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]: """Get the notification count by room for a user. Only considers notifications, @@ -711,25 +584,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # First we pull the counts from the summary table. # # We check that `last_receipt_stream_ordering` matches the stream ordering of the @@ -1545,25 +1399,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas (room_id, user_id, stream_ordering, *thread_args), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # Fetch the notification counts between the stream ordering of the # latest receipt and what was previously summarised. unread_counts = self._get_notif_unread_count_for_user_room( @@ -1698,19 +1533,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas rotate_to_stream_ordering: The new maximum event stream ordering to summarise. """ - # Ensure that any new actions have an updated thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """, - (MAIN_TIMELINE, old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - # XXX Do we need to update summaries here too? - # Calculate the new counts that should be upserted into event_push_summary sql = """ SELECT user_id, room_id, thread_id, @@ -1773,20 +1595,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas logger.info("Rotating notifications, handling %d rows", len(summaries)) - # Ensure that any updated threads have the proper thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute_batch( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - [ - (MAIN_TIMELINE, room_id, user_id) - for user_id, room_id, _ in summaries - ], - ) - self.db_pool.simple_upsert_many_txn( txn, table="event_push_summary", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index df2cc31ca6..5cc786f030 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -110,6 +110,9 @@ SCHEMA_COMPAT_VERSION = ( # Queries against `event_stream_ordering` columns in membership tables must # be disambiguated. # + # The threads_id column must written to with non-null values for the + # event_push_actions, event_push_actions_staging, and event_push_summary tables. + # # insertions to the column `full_user_id` of tables profiles and user_filters can no # longer be null 76 diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql new file mode 100644 index 0000000000..ce6f9ff937 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql @@ -0,0 +1,28 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Force the background updates from 06thread_notifications.sql to run in the +-- foreground as code will now require those to be "done". + +DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_id'; + +-- Overwrite any null thread_id values. +UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; + +-- Drop the background updates to calculate the indexes used to find null thread_ids. +DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null'; +DELETE FROM background_updates WHERE update_name = 'event_push_summary_thread_id_null'; diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite new file mode 100644 index 0000000000..d19b9648b5 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite @@ -0,0 +1,102 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + -- The thread_id columns can now be made non-nullable. +-- +-- SQLite doesn't support modifying columns to an existing table, so it must +-- be recreated. + +-- Create the new tables. +CREATE TABLE event_push_actions_staging_new ( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + actions TEXT NOT NULL, + notif SMALLINT NOT NULL, + highlight SMALLINT NOT NULL, + unread SMALLINT, + thread_id TEXT, + inserted_ts BIGINT, + CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_actions_new ( + room_id TEXT NOT NULL, + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + topological_ordering BIGINT, + stream_ordering BIGINT, + notif SMALLINT, + highlight SMALLINT, + unread SMALLINT, + thread_id TEXT, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag), + CONSTRAINT event_push_actions_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_summary_new ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + notif_count BIGINT NOT NULL, + stream_ordering BIGINT NOT NULL, + unread_count BIGINT, + last_receipt_stream_ordering BIGINT, + thread_id TEXT, + CONSTRAINT event_push_summary_thread_id CHECK (thread_id is NOT NULL) +); + +-- Copy the data. +INSERT INTO event_push_actions_staging_new (event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts) + SELECT event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts + FROM event_push_actions_staging; + +INSERT INTO event_push_actions_new (room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id) + SELECT room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id + FROM event_push_actions; + +INSERT INTO event_push_summary_new (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id) + SELECT user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id + FROM event_push_summary; + +-- Drop the old tables. +DROP TABLE event_push_actions_staging; +DROP TABLE event_push_actions; +DROP TABLE event_push_summary; + +-- Rename the tables. +ALTER TABLE event_push_actions_staging_new RENAME TO event_push_actions_staging; +ALTER TABLE event_push_actions_new RENAME TO event_push_actions; +ALTER TABLE event_push_summary_new RENAME TO event_push_summary; + +-- Recreate the indexes. +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); + +CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); +CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); +CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); + +CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ; + +-- Recreate some indexes in the background, by re-running the background updates +-- from 72/02event_push_actions_index.sql and 72/06thread_notifications.sql. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_summary_unique_index2', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_actions_stream_highlight_index', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres new file mode 100644 index 0000000000..381184b5e2 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres @@ -0,0 +1,27 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions + ADD CONSTRAINT event_push_actions_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7706, 'event_push_actions_thread_id', '{}', 'event_push_actions_staging_thread_id'); diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres new file mode 100644 index 0000000000..395f9c7260 --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres @@ -0,0 +1,27 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions_staging + ADD CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7706, 'event_push_actions_staging_thread_id', '{}'); diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres new file mode 100644 index 0000000000..140ceff1fa --- /dev/null +++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres @@ -0,0 +1,29 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_summary + ADD CONSTRAINT event_push_summary_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7706, 'event_push_summary_thread_id', '{}', 'event_push_actions_thread_id'), + -- Also clean-up the old indexes. + (7706, 'event_push_drop_null_thread_id_indexes', '{}', 'event_push_summary_thread_id'); From 179f0f851e456c8dda3c7092bcb72bd2ec5e65cc Mon Sep 17 00:00:00 2001 From: Grant McLean Date: Sat, 27 May 2023 05:28:04 +1200 Subject: [PATCH 032/562] Documentation improvements to contributing guide (#15667) (#15668) Fix #15667 - Reiterate the importance of getting Rust installed and set up before attempting to install the Python dependencies. - Mention the importance of confirming that `poetry install` completed successfully and include a typical error that the user might see if it did not. - Expand on "Now edit homeserver.yaml" to give examples of things likely to need changing and to link to the relevant sections of the Synapse server documentation. --- changelog.d/15668.doc | 1 + docs/development/contributing_guide.md | 33 ++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15668.doc diff --git a/changelog.d/15668.doc b/changelog.d/15668.doc new file mode 100644 index 0000000000..3526a4d50c --- /dev/null +++ b/changelog.d/15668.doc @@ -0,0 +1 @@ +Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 56cf4ba81e..f5ba55afb7 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -22,6 +22,9 @@ on Windows is not officially supported. The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough. +A recent version of the Rust compiler is needed to build the native modules. The +easiest way of installing the latest version is to use [rustup](https://rustup.rs/). + Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`. Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`. @@ -30,9 +33,6 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/). -A recent version of the Rust compiler is needed to build the native modules. The -easiest way of installing the latest version is to use [rustup](https://rustup.rs/). - # 3. Get the source. @@ -53,6 +53,11 @@ can find many good git tutorials on the web. # 4. Install the dependencies + +Before installing the Python dependencies, make sure you have installed a recent version +of Rust (see the "What do I need?" section above). The easiest way of installing the +latest version is to use [rustup](https://rustup.rs/). + Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies and development environment. Once you have installed Python 3 and added the source, you should install `poetry`. @@ -76,7 +81,8 @@ cd path/where/you/have/cloned/the/repository poetry install --extras all ``` -This will install the runtime and developer dependencies for the project. +This will install the runtime and developer dependencies for the project. Be sure to check +that the `poetry install` step completed cleanly. ## Running Synapse via poetry @@ -84,14 +90,31 @@ To start a local instance of Synapse in the locked poetry environment, create a ```sh cp docs/sample_config.yaml homeserver.yaml +cp docs/sample_log_config.yaml log_config.yaml ``` -Now edit homeserver.yaml, and run Synapse with: +Now edit `homeserver.yaml`, things you might want to change include: + +- Set a `server_name` +- Adjusting paths to be correct for your system like the `log_config` to point to the log config you just copied +- Using a [PostgreSQL database instead of SQLite](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database) +- Adding a [`registration_shared_secret`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration_shared_secret) so you can use [`register_new_matrix_user` command](https://matrix-org.github.io/synapse/latest/setup/installation.html#registering-a-user). + +And then run Synapse with the following command: ```sh poetry run python -m synapse.app.homeserver -c homeserver.yaml ``` +If you get an error like the following: + +``` +importlib.metadata.PackageNotFoundError: matrix-synapse +``` + +this probably indicates that the `poetry install` step did not complete cleanly - go back and +resolve any issues and re-run until successful. + # 5. Get in touch. Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)! From 50918c494057dc93bfa6e37f7d140d68711846d1 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 26 May 2023 12:05:24 -0600 Subject: [PATCH 033/562] Add `MSC3820opt2` as a known room version (#15678) --- changelog.d/15678.misc | 1 + synapse/api/room_versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15678.misc diff --git a/changelog.d/15678.misc b/changelog.d/15678.misc new file mode 100644 index 0000000000..92eae49952 --- /dev/null +++ b/changelog.d/15678.misc @@ -0,0 +1 @@ +Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 035a14171b..c5c71e242f 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -528,6 +528,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V10, RoomVersions.MSC2716v4, RoomVersions.MSC3989, + RoomVersions.MSC3820opt2, ) } From c835befd10ae0087c3c54a36989ba347313b68af Mon Sep 17 00:00:00 2001 From: Jason Little Date: Fri, 26 May 2023 14:28:39 -0500 Subject: [PATCH 034/562] Add Unix socket support for Redis connections (#15644) Adds a new configuration setting to connect to Redis via a Unix socket instead of over TCP. Disabled by default. --- changelog.d/15644.feature | 1 + .../configuration/config_documentation.md | 4 ++ stubs/txredisapi.pyi | 3 + synapse/config/redis.py | 1 + synapse/replication/tcp/handler.py | 10 ++- synapse/replication/tcp/redis.py | 62 ++++++++++++++++--- synapse/server.py | 42 ++++++++----- 7 files changed, 100 insertions(+), 23 deletions(-) create mode 100644 changelog.d/15644.feature diff --git a/changelog.d/15644.feature b/changelog.d/15644.feature new file mode 100644 index 0000000000..1b6126af53 --- /dev/null +++ b/changelog.d/15644.feature @@ -0,0 +1 @@ +Add Unix socket support for Redis connections. Contributed by Jason Little. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 93b132b6e4..5ede6d0a82 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3979,6 +3979,8 @@ This setting has the following sub-options: * `enabled`: whether to use Redis support. Defaults to false. * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 +* `path`: The full path to a local Unix socket file. **If this is used, `host` and + `port` are ignored.** Defaults to `/tmp/redis.sock' * `password`: Optional password if configured on the Redis instance. * `dbid`: Optional redis dbid if needs to connect to specific redis logical db. * `use_tls`: Whether to use tls connection. Defaults to false. @@ -3991,6 +3993,8 @@ This setting has the following sub-options: _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_ + _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_ + Example configuration: ```yaml redis: diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 695a2307c2..b7bd59d2ea 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -61,6 +61,9 @@ def lazyConnection( # most methods to it via ConnectionHandler.__getattr__. class ConnectionHandler(RedisProtocol): def disconnect(self) -> "Deferred[None]": ... + def __repr__(self) -> str: ... + +class UnixConnectionHandler(ConnectionHandler): ... class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool diff --git a/synapse/config/redis.py b/synapse/config/redis.py index 636cb450b8..3c4c499e22 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -33,6 +33,7 @@ class RedisConfig(Config): self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) + self.redis_path = redis_config.get("path", None) self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 233ad61d49..5d108fe11b 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -352,7 +352,15 @@ class ReplicationCommandHandler: reactor = hs.get_reactor() redis_config = hs.config.redis - if hs.config.redis.redis_use_tls: + if redis_config.redis_path is not None: + reactor.connectUNIX( + redis_config.redis_path, + self._factory, + timeout=30, + checkPID=False, + ) + + elif hs.config.redis.redis_use_tls: ssl_context_factory = ClientContextFactory(hs.config.redis) reactor.connectSSL( redis_config.redis_host, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index c8f4bf8b27..7e96145b3b 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -17,7 +17,12 @@ from inspect import isawaitable from typing import TYPE_CHECKING, Any, Generic, List, Optional, Type, TypeVar, cast import attr -import txredisapi +from txredisapi import ( + ConnectionHandler, + RedisFactory, + SubscriberProtocol, + UnixConnectionHandler, +) from zope.interface import implementer from twisted.internet.address import IPv4Address, IPv6Address @@ -68,7 +73,7 @@ class ConstantProperty(Generic[T, V]): @implementer(IReplicationConnection) -class RedisSubscriber(txredisapi.SubscriberProtocol): +class RedisSubscriber(SubscriberProtocol): """Connection to redis subscribed to replication stream. This class fulfils two functions: @@ -95,7 +100,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): synapse_handler: "ReplicationCommandHandler" synapse_stream_prefix: str synapse_channel_names: List[str] - synapse_outbound_redis_connection: txredisapi.ConnectionHandler + synapse_outbound_redis_connection: ConnectionHandler def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) @@ -229,7 +234,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): ) -class SynapseRedisFactory(txredisapi.RedisFactory): +class SynapseRedisFactory(RedisFactory): """A subclass of RedisFactory that periodically sends pings to ensure that we detect dead connections. """ @@ -245,7 +250,7 @@ class SynapseRedisFactory(txredisapi.RedisFactory): dbid: Optional[int], poolsize: int, isLazy: bool = False, - handler: Type = txredisapi.ConnectionHandler, + handler: Type = ConnectionHandler, charset: str = "utf-8", password: Optional[str] = None, replyTimeout: int = 30, @@ -326,7 +331,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): def __init__( self, hs: "HomeServer", - outbound_redis_connection: txredisapi.ConnectionHandler, + outbound_redis_connection: ConnectionHandler, channel_names: List[str], ): super().__init__( @@ -368,7 +373,7 @@ def lazyConnection( reconnect: bool = True, password: Optional[str] = None, replyTimeout: int = 30, -) -> txredisapi.ConnectionHandler: +) -> ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the connections is lost. """ @@ -380,7 +385,7 @@ def lazyConnection( dbid=dbid, poolsize=1, isLazy=True, - handler=txredisapi.ConnectionHandler, + handler=ConnectionHandler, password=password, replyTimeout=replyTimeout, ) @@ -408,3 +413,44 @@ def lazyConnection( ) return factory.handler + + +def lazyUnixConnection( + hs: "HomeServer", + path: str = "/tmp/redis.sock", + dbid: Optional[int] = None, + reconnect: bool = True, + password: Optional[str] = None, + replyTimeout: int = 30, +) -> ConnectionHandler: + """Creates a connection to Redis that is lazily set up and reconnects if the + connection is lost. + + Returns: + A subclass of ConnectionHandler, which is a UnixConnectionHandler in this case. + """ + + uuid = path + + factory = SynapseRedisFactory( + hs, + uuid=uuid, + dbid=dbid, + poolsize=1, + isLazy=True, + handler=UnixConnectionHandler, + password=password, + replyTimeout=replyTimeout, + ) + factory.continueTrying = reconnect + + reactor = hs.get_reactor() + + reactor.connectUNIX( + path, + factory, + timeout=30, + checkPID=False, + ) + + return factory.handler diff --git a/synapse/server.py b/synapse/server.py index f6e245569c..cce5fb66ff 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -864,22 +864,36 @@ class HomeServer(metaclass=abc.ABCMeta): # We only want to import redis module if we're using it, as we have # `txredisapi` as an optional dependency. - from synapse.replication.tcp.redis import lazyConnection + from synapse.replication.tcp.redis import lazyConnection, lazyUnixConnection - logger.info( - "Connecting to redis (host=%r port=%r) for external cache", - self.config.redis.redis_host, - self.config.redis.redis_port, - ) + if self.config.redis.redis_path is None: + logger.info( + "Connecting to redis (host=%r port=%r) for external cache", + self.config.redis.redis_host, + self.config.redis.redis_port, + ) - return lazyConnection( - hs=self, - host=self.config.redis.redis_host, - port=self.config.redis.redis_port, - dbid=self.config.redis.redis_dbid, - password=self.config.redis.redis_password, - reconnect=True, - ) + return lazyConnection( + hs=self, + host=self.config.redis.redis_host, + port=self.config.redis.redis_port, + dbid=self.config.redis.redis_dbid, + password=self.config.redis.redis_password, + reconnect=True, + ) + else: + logger.info( + "Connecting to redis (path=%r) for external cache", + self.config.redis.redis_path, + ) + + return lazyUnixConnection( + hs=self, + path=self.config.redis.redis_path, + dbid=self.config.redis.redis_dbid, + password=self.config.redis.redis_password, + reconnect=True, + ) def should_send_federation(self) -> bool: "Should this server be sending federation traffic directly?" From 4f07c2a170aceb8f0ede67f654805d55301b422e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:07:25 -0400 Subject: [PATCH 035/562] Bump types-pyyaml from 6.0.12.9 to 6.0.12.10 (#15683) --- changelog.d/15683.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15683.misc diff --git a/changelog.d/15683.misc b/changelog.d/15683.misc new file mode 100644 index 0000000000..147f13b99c --- /dev/null +++ b/changelog.d/15683.misc @@ -0,0 +1 @@ +Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. diff --git a/poetry.lock b/poetry.lock index 3f8bf7c304..83ea43b59a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3097,14 +3097,14 @@ cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.9" +version = "6.0.12.10" description = "Typing stubs for PyYAML" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.9.tar.gz", hash = "sha256:c51b1bd6d99ddf0aa2884a7a328810ebf70a4262c292195d3f4f9a0005f9eeb6"}, - {file = "types_PyYAML-6.0.12.9-py3-none-any.whl", hash = "sha256:5aed5aa66bd2d2e158f75dda22b059570ede988559f030cf294871d3b647e3e8"}, + {file = "types-PyYAML-6.0.12.10.tar.gz", hash = "sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97"}, + {file = "types_PyYAML-6.0.12.10-py3-none-any.whl", hash = "sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f"}, ] [[package]] From ea634a9f811fe768efec51edab5b9a9af6ef53e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:13:40 -0400 Subject: [PATCH 036/562] Bump prometheus-client from 0.16.0 to 0.17.0 (#15682) --- changelog.d/15682.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15682.misc diff --git a/changelog.d/15682.misc b/changelog.d/15682.misc new file mode 100644 index 0000000000..687af7d8d7 --- /dev/null +++ b/changelog.d/15682.misc @@ -0,0 +1 @@ +Bump prometheus-client from 0.16.0 to 0.17.0. diff --git a/poetry.lock b/poetry.lock index 83ea43b59a..ecf704ea93 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1781,14 +1781,14 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.17.0" description = "Python client for the Prometheus monitoring system." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "prometheus_client-0.16.0-py3-none-any.whl", hash = "sha256:0836af6eb2c8f4fed712b2f279f6c0a8bbab29f9f4aa15276b91c7cb0d1616ab"}, - {file = "prometheus_client-0.16.0.tar.gz", hash = "sha256:a03e35b359f14dd1630898543e2120addfdeacd1a6069c1367ae90fd93ad3f48"}, + {file = "prometheus_client-0.17.0-py3-none-any.whl", hash = "sha256:a77b708cf083f4d1a3fb3ce5c95b4afa32b9c521ae363354a4a910204ea095ce"}, + {file = "prometheus_client-0.17.0.tar.gz", hash = "sha256:9c3b26f1535945e85b8934fb374678d263137b78ef85f305b1156c7c881cd11b"}, ] [package.extras] From eb48b10f4fa28ee9839a2b42418889b47c7c36bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:14:58 -0400 Subject: [PATCH 037/562] Bump pydantic from 1.10.7 to 1.10.8 (#15685) --- changelog.d/15685.misc | 1 + poetry.lock | 74 +++++++++++++++++++++--------------------- 2 files changed, 38 insertions(+), 37 deletions(-) create mode 100644 changelog.d/15685.misc diff --git a/changelog.d/15685.misc b/changelog.d/15685.misc new file mode 100644 index 0000000000..7d4cf65bf3 --- /dev/null +++ b/changelog.d/15685.misc @@ -0,0 +1 @@ +Bump pydantic from 1.10.7 to 1.10.8. diff --git a/poetry.lock b/poetry.lock index ecf704ea93..60f09219fe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1887,48 +1887,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.7" +version = "1.10.8" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e79e999e539872e903767c417c897e729e015872040e56b96e67968c3b918b2d"}, - {file = "pydantic-1.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01aea3a42c13f2602b7ecbbea484a98169fb568ebd9e247593ea05f01b884b2e"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:516f1ed9bc2406a0467dd777afc636c7091d71f214d5e413d64fef45174cfc7a"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae150a63564929c675d7f2303008d88426a0add46efd76c3fc797cd71cb1b46f"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecbbc51391248116c0a055899e6c3e7ffbb11fb5e2a4cd6f2d0b93272118a209"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f4a2b50e2b03d5776e7f21af73e2070e1b5c0d0df255a827e7c632962f8315af"}, - {file = "pydantic-1.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:a7cd2251439988b413cb0a985c4ed82b6c6aac382dbaff53ae03c4b23a70e80a"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68792151e174a4aa9e9fc1b4e653e65a354a2fa0fed169f7b3d09902ad2cb6f1"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe2507b8ef209da71b6fb5f4e597b50c5a34b78d7e857c4f8f3115effaef5fe"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a86d8c8db68086f1e30a530f7d5f83eb0685e632e411dbbcf2d5c0150e8dcd"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75ae19d2a3dbb146b6f324031c24f8a3f52ff5d6a9f22f0683694b3afcb16fb"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:464855a7ff7f2cc2cf537ecc421291b9132aa9c79aef44e917ad711b4a93163b"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:193924c563fae6ddcb71d3f06fa153866423ac1b793a47936656e806b64e24ca"}, - {file = "pydantic-1.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:b4a849d10f211389502059c33332e91327bc154acc1845f375a99eca3afa802d"}, - {file = "pydantic-1.10.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cc1dde4e50a5fc1336ee0581c1612215bc64ed6d28d2c7c6f25d2fe3e7c3e918"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0cfe895a504c060e5d36b287ee696e2fdad02d89e0d895f83037245218a87fe"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:670bb4683ad1e48b0ecb06f0cfe2178dcf74ff27921cdf1606e527d2617a81ee"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:950ce33857841f9a337ce07ddf46bc84e1c4946d2a3bba18f8280297157a3fd1"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c15582f9055fbc1bfe50266a19771bbbef33dd28c45e78afbe1996fd70966c2a"}, - {file = "pydantic-1.10.7-cp37-cp37m-win_amd64.whl", hash = "sha256:82dffb306dd20bd5268fd6379bc4bfe75242a9c2b79fec58e1041fbbdb1f7914"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c7f51861d73e8b9ddcb9916ae7ac39fb52761d9ea0df41128e81e2ba42886cd"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6434b49c0b03a51021ade5c4daa7d70c98f7a79e95b551201fff682fc1661245"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64d34ab766fa056df49013bb6e79921a0265204c071984e75a09cbceacbbdd5d"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:701daea9ffe9d26f97b52f1d157e0d4121644f0fcf80b443248434958fd03dc3"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf135c46099ff3f919d2150a948ce94b9ce545598ef2c6c7bf55dca98a304b52"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0f85904f73161817b80781cc150f8b906d521fa11e3cdabae19a581c3606209"}, - {file = "pydantic-1.10.7-cp38-cp38-win_amd64.whl", hash = "sha256:9f6f0fd68d73257ad6685419478c5aece46432f4bdd8d32c7345f1986496171e"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c230c0d8a322276d6e7b88c3f7ce885f9ed16e0910354510e0bae84d54991143"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:976cae77ba6a49d80f461fd8bba183ff7ba79f44aa5cfa82f1346b5626542f8e"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d45fc99d64af9aaf7e308054a0067fdcd87ffe974f2442312372dfa66e1001d"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2a5ebb48958754d386195fe9e9c5106f11275867051bf017a8059410e9abf1f"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:abfb7d4a7cd5cc4e1d1887c43503a7c5dd608eadf8bc615413fc498d3e4645cd"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:80b1fab4deb08a8292d15e43a6edccdffa5377a36a4597bb545b93e79c5ff0a5"}, - {file = "pydantic-1.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:d71e69699498b020ea198468e2480a2f1e7433e32a3a99760058c6520e2bea7e"}, - {file = "pydantic-1.10.7-py3-none-any.whl", hash = "sha256:0cd181f1d0b1d00e2b705f1bf1ac7799a2d938cce3376b8007df62b29be3c2c6"}, - {file = "pydantic-1.10.7.tar.gz", hash = "sha256:cfc83c0678b6ba51b0532bea66860617c4cd4251ecf76e9846fa5a9f3454e97e"}, + {file = "pydantic-1.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1243d28e9b05003a89d72e7915fdb26ffd1d39bdd39b00b7dbe4afae4b557f9d"}, + {file = "pydantic-1.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0ab53b609c11dfc0c060d94335993cc2b95b2150e25583bec37a49b2d6c6c3f"}, + {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9613fadad06b4f3bc5db2653ce2f22e0de84a7c6c293909b48f6ed37b83c61f"}, + {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df7800cb1984d8f6e249351139667a8c50a379009271ee6236138a22a0c0f319"}, + {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0c6fafa0965b539d7aab0a673a046466d23b86e4b0e8019d25fd53f4df62c277"}, + {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e82d4566fcd527eae8b244fa952d99f2ca3172b7e97add0b43e2d97ee77f81ab"}, + {file = "pydantic-1.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:ab523c31e22943713d80d8d342d23b6f6ac4b792a1e54064a8d0cf78fd64e800"}, + {file = "pydantic-1.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:666bdf6066bf6dbc107b30d034615d2627e2121506c555f73f90b54a463d1f33"}, + {file = "pydantic-1.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35db5301b82e8661fa9c505c800d0990bc14e9f36f98932bb1d248c0ac5cada5"}, + {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90c1e29f447557e9e26afb1c4dbf8768a10cc676e3781b6a577841ade126b85"}, + {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e766b4a8226e0708ef243e843105bf124e21331694367f95f4e3b4a92bbb3f"}, + {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88f195f582851e8db960b4a94c3e3ad25692c1c1539e2552f3df7a9e972ef60e"}, + {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:34d327c81e68a1ecb52fe9c8d50c8a9b3e90d3c8ad991bfc8f953fb477d42fb4"}, + {file = "pydantic-1.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:d532bf00f381bd6bc62cabc7d1372096b75a33bc197a312b03f5838b4fb84edd"}, + {file = "pydantic-1.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d5b8641c24886d764a74ec541d2fc2c7fb19f6da2a4001e6d580ba4a38f7878"}, + {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f6cb446470b7ddf86c2e57cd119a24959af2b01e552f60705910663af09a4"}, + {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c33b60054b2136aef8cf190cd4c52a3daa20b2263917c49adad20eaf381e823b"}, + {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1952526ba40b220b912cdc43c1c32bcf4a58e3f192fa313ee665916b26befb68"}, + {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb14388ec45a7a0dc429e87def6396f9e73c8c77818c927b6a60706603d5f2ea"}, + {file = "pydantic-1.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:16f8c3e33af1e9bb16c7a91fc7d5fa9fe27298e9f299cff6cb744d89d573d62c"}, + {file = "pydantic-1.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ced8375969673929809d7f36ad322934c35de4af3b5e5b09ec967c21f9f7887"}, + {file = "pydantic-1.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93e6bcfccbd831894a6a434b0aeb1947f9e70b7468f274154d03d71fabb1d7c6"}, + {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:191ba419b605f897ede9892f6c56fb182f40a15d309ef0142212200a10af4c18"}, + {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052d8654cb65174d6f9490cc9b9a200083a82cf5c3c5d3985db765757eb3b375"}, + {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ceb6a23bf1ba4b837d0cfe378329ad3f351b5897c8d4914ce95b85fba96da5a1"}, + {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f2e754d5566f050954727c77f094e01793bcb5725b663bf628fa6743a5a9108"}, + {file = "pydantic-1.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a82d6cda82258efca32b40040228ecf43a548671cb174a1e81477195ed3ed56"}, + {file = "pydantic-1.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e59417ba8a17265e632af99cc5f35ec309de5980c440c255ab1ca3ae96a3e0e"}, + {file = "pydantic-1.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84d80219c3f8d4cad44575e18404099c76851bc924ce5ab1c4c8bb5e2a2227d0"}, + {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4148e635994d57d834be1182a44bdb07dd867fa3c2d1b37002000646cc5459"}, + {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12f7b0bf8553e310e530e9f3a2f5734c68699f42218bf3568ef49cd9b0e44df4"}, + {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42aa0c4b5c3025483240a25b09f3c09a189481ddda2ea3a831a9d25f444e03c1"}, + {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17aef11cc1b997f9d574b91909fed40761e13fac438d72b81f902226a69dac01"}, + {file = "pydantic-1.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:66a703d1983c675a6e0fed8953b0971c44dba48a929a2000a493c3772eb61a5a"}, + {file = "pydantic-1.10.8-py3-none-any.whl", hash = "sha256:7456eb22ed9aaa24ff3e7b4757da20d9e5ce2a81018c1b3ebd81a0b88a18f3b2"}, + {file = "pydantic-1.10.8.tar.gz", hash = "sha256:1410275520dfa70effadf4c21811d755e7ef9bb1f1d077a21958153a92c8d9ca"}, ] [package.dependencies] From 04798b710dc2cc8cf5a8cfb8a454f03cbfa8840c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 14:15:49 -0400 Subject: [PATCH 038/562] Bump log from 0.4.17 to 0.4.18 (#15681) --- Cargo.lock | 7 ++----- changelog.d/15681.misc | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15681.misc diff --git a/Cargo.lock b/Cargo.lock index e169a665b6..08331385c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,12 +132,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "memchr" diff --git a/changelog.d/15681.misc b/changelog.d/15681.misc new file mode 100644 index 0000000000..2de551dd63 --- /dev/null +++ b/changelog.d/15681.misc @@ -0,0 +1 @@ +Bump log from 0.4.17 to 0.4.18. From 2b6c9150dca9fa1884c0f2e27d5ee268be243c2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 11:03:58 +0100 Subject: [PATCH 039/562] Bump types-requests from 2.30.0.0 to 2.31.0.0 (#15684) * Bump types-requests from 2.30.0.0 to 2.31.0.0 Bumps [types-requests](https://github.com/python/typeshed) from 2.30.0.0 to 2.31.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15684.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15684.misc diff --git a/changelog.d/15684.misc b/changelog.d/15684.misc new file mode 100644 index 0000000000..4c2edf87fd --- /dev/null +++ b/changelog.d/15684.misc @@ -0,0 +1 @@ +Bump types-requests from 2.30.0.0 to 2.31.0.0. diff --git a/poetry.lock b/poetry.lock index 60f09219fe..4057ef04e3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3109,14 +3109,14 @@ files = [ [[package]] name = "types-requests" -version = "2.30.0.0" +version = "2.31.0.0" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.30.0.0.tar.gz", hash = "sha256:dec781054324a70ba64430ae9e62e7e9c8e4618c185a5cb3f87a6738251b5a31"}, - {file = "types_requests-2.30.0.0-py3-none-any.whl", hash = "sha256:c6cf08e120ca9f0dc4fa4e32c3f953c3fba222bcc1db6b97695bce8da1ba9864"}, + {file = "types-requests-2.31.0.0.tar.gz", hash = "sha256:c1c29d20ab8d84dff468d7febfe8e0cb0b4664543221b386605e14672b44ea25"}, + {file = "types_requests-2.31.0.0-py3-none-any.whl", hash = "sha256:7c5cea7940f8e92ec560bbc468f65bf684aa3dcf0554a6f8c4710f5f708dc598"}, ] [package.dependencies] From 626bd75f4847f36747c162348e309b65cc1646b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 11:13:04 +0100 Subject: [PATCH 040/562] Bump types-bleach from 6.0.0.1 to 6.0.0.3 (#15686) * Bump types-bleach from 6.0.0.1 to 6.0.0.3 Bumps [types-bleach](https://github.com/python/typeshed) from 6.0.0.1 to 6.0.0.3. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-bleach dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions Co-authored-by: Patrick Cloke Co-authored-by: David Robertson --- changelog.d/15686.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15686.misc diff --git a/changelog.d/15686.misc b/changelog.d/15686.misc new file mode 100644 index 0000000000..feacbf35d6 --- /dev/null +++ b/changelog.d/15686.misc @@ -0,0 +1 @@ +Bump types-bleach from 6.0.0.1 to 6.0.0.3. diff --git a/poetry.lock b/poetry.lock index 4057ef04e3..0879e64cf1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2998,14 +2998,14 @@ files = [ [[package]] name = "types-bleach" -version = "6.0.0.1" +version = "6.0.0.3" description = "Typing stubs for bleach" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-bleach-6.0.0.1.tar.gz", hash = "sha256:43d9129deb9e82918747437edf78f09ff440f2973f4702625b61994f3e698518"}, - {file = "types_bleach-6.0.0.1-py3-none-any.whl", hash = "sha256:440df967254007be80bb0f4d851f026c29c709cc48359bf4935d2b2f3a6f9f90"}, + {file = "types-bleach-6.0.0.3.tar.gz", hash = "sha256:8ce7896d4f658c562768674ffcf07492c7730e128018f03edd163ff912bfadee"}, + {file = "types_bleach-6.0.0.3-py3-none-any.whl", hash = "sha256:d43eaf30a643ca824e16e2dcdb0c87ef9226237e2fa3ac4732a50cb3f32e145f"}, ] [[package]] From 42786d8a477b6d44075b0e56949820331d9962d8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 13:54:50 +0100 Subject: [PATCH 041/562] Create dependabot changelogs at release time (#15481) * Ditch dependabot changelog workflow * Summarise dependabot commits in release script * Changelog * Update scripts-dev/release.py --- .github/workflows/dependabot_changelog.yml | 49 -------------------- changelog.d/15481.misc | 1 + docs/development/dependencies.md | 12 ++--- scripts-dev/release.py | 52 ++++++++++++++++++++-- 4 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 .github/workflows/dependabot_changelog.yml create mode 100644 changelog.d/15481.misc diff --git a/.github/workflows/dependabot_changelog.yml b/.github/workflows/dependabot_changelog.yml deleted file mode 100644 index df47e3dcba..0000000000 --- a/.github/workflows/dependabot_changelog.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Write changelog for dependabot PR -on: - pull_request: - types: - - opened - - reopened # For debugging! - -permissions: - # Needed to be able to push the commit. See - # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request - # for a similar example - contents: write - -jobs: - add-changelog: - runs-on: 'ubuntu-latest' - if: ${{ github.actor == 'dependabot[bot]' }} - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.ref }} - - name: Write, commit and push changelog - env: - PR_TITLE: ${{ github.event.pull_request.title }} - PR_NUMBER: ${{ github.event.pull_request.number }} - run: | - echo "${PR_TITLE}." > "changelog.d/${PR_NUMBER}".misc - git add changelog.d - git config user.email "github-actions[bot]@users.noreply.github.com" - git config user.name "GitHub Actions" - git commit -m "Changelog" - git push - shell: bash - # The `git push` above does not trigger CI on the dependabot PR. - # - # By default, workflows can't trigger other workflows when they're just using the - # default `GITHUB_TOKEN` access token. (This is intended to stop you from writing - # recursive workflow loops by accident, because that'll get very expensive very - # quickly.) Instead, you have to manually call out to another workflow, or else - # make your changes (i.e. the `git push` above) using a personal access token. - # See - # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow - # - # I have tried and failed to find a way to trigger CI on the "merge ref" of the PR. - # See git commit history for previous attempts. If anyone desperately wants to try - # again in the future, make a matrix-bot account and use its access token to git push. - - # THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they - # are sufficiently locked down to dependabot only as above. diff --git a/changelog.d/15481.misc b/changelog.d/15481.misc new file mode 100644 index 0000000000..a6e088c164 --- /dev/null +++ b/changelog.d/15481.misc @@ -0,0 +1 @@ +Create dependabot changelogs at release time. diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index c4449c51f7..b5926d96ff 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -260,15 +260,17 @@ doesn't require poetry. (It's what we use in CI too). However, you could try ## ...handle a Dependabot pull request? -Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it -creates a pull request a GitHub Action will run to automatically create a changelog -file. Ensure that: +Synapse uses Dependabot to keep the `poetry.lock` and `Cargo.lock` file +up-to-date with the latest releases of our dependencies. The changelog check is +omitted for Dependabot PRs; the release script will include them in the +changelog. + +When reviewing a dependabot PR, ensure that: * the lockfile changes look reasonable; * the upstream changelog file (linked in the description) doesn't include any breaking changes; -* continuous integration passes (due to permissions, the GitHub Actions run on - the changelog commit will fail, look at the initial commit of the pull request); +* continuous integration passes. In particular, any updates to the type hints (usually packages which start with `types-`) should be safe to merge if linting passes. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index ec92a59bb8..257d1e9ebd 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -27,7 +27,7 @@ import time import urllib.request from os import path from tempfile import TemporaryDirectory -from typing import Any, List, Optional +from typing import Any, List, Match, Optional, Union import attr import click @@ -233,7 +233,7 @@ def _prepare() -> None: subprocess.check_output(["poetry", "version", new_version]) # Generate changelogs. - generate_and_write_changelog(current_version, new_version) + generate_and_write_changelog(synapse_repo, current_version, new_version) # Generate debian changelogs if parsed_new_version.pre is not None: @@ -814,7 +814,7 @@ def get_changes_for_version(wanted_version: version.Version) -> str: def generate_and_write_changelog( - current_version: version.Version, new_version: str + repo: Repo, current_version: version.Version, new_version: str ) -> None: # We do this by getting a draft so that we can edit it before writing to the # changelog. @@ -827,6 +827,10 @@ def generate_and_write_changelog( new_changes = new_changes.replace( "No significant changes.", f"No significant changes since {current_version}." ) + new_changes += build_dependabot_changelog( + repo, + current_version, + ) # Prepend changes to changelog with open("CHANGES.md", "r+") as f: @@ -841,5 +845,47 @@ def generate_and_write_changelog( os.remove(filename) +def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> str: + """Summarise dependabot commits between `current_version` and `release_branch`. + + Returns an empty string if there have been no such commits; otherwise outputs a + third-level markdown header followed by an unordered list.""" + last_release_commit = repo.tag("v" + str(current_version)).commit + rev_spec = f"{last_release_commit.hexsha}.." + commits = list(git.objects.Commit.iter_items(repo, rev_spec)) + messages = [] + for commit in reversed(commits): + if commit.author.name == "dependabot[bot]": + message: Union[str, bytes] = commit.message + if isinstance(message, bytes): + message = message.decode("utf-8") + messages.append(message.split("\n", maxsplit=1)[0]) + + if not messages: + print(f"No dependabot commits in range {rev_spec}", file=sys.stderr) + return "" + + messages.sort() + + def replacer(match: Match[str]) -> str: + desc = match.group(1) + number = match.group(2) + return f"* {desc}. ([\\#{number}](https://github.com/matrix-org/synapse/issues/{number}))" + + for i, message in enumerate(messages): + messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message) + messages.insert(0, "### Updates to locked dependencies\n") + return "\n".join(messages) + + +@cli.command() +@click.argument("since") +def test_dependabot_changelog(since: str) -> None: + """Test building the dependabot changelog. + + Summarises all dependabot commits between the SINCE tag and the current git HEAD.""" + print(build_dependabot_changelog(git.Repo("."), version.Version(since))) + + if __name__ == "__main__": cli() From a103b874dddc6246b06b168992fbdb7aaeb0183f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:03:22 +0100 Subject: [PATCH 042/562] 1.85.0rc1 --- CHANGES.md | 75 +++++++++++++++++++++++++++++++++++++++ changelog.d/10428.removal | 1 - changelog.d/15464.bugfix | 1 - changelog.d/15481.misc | 1 - changelog.d/15537.misc | 1 - changelog.d/15578.misc | 1 - changelog.d/15585.feature | 1 - changelog.d/15597.misc | 1 - changelog.d/15599.bugfix | 1 - changelog.d/15601.bugfix | 1 - changelog.d/15602.misc | 1 - changelog.d/15604.misc | 1 - changelog.d/15606.misc | 1 - changelog.d/15607.bugfix | 1 - changelog.d/15610.misc | 1 - changelog.d/15611.feature | 1 - changelog.d/15613.doc | 1 - changelog.d/15614.bugfix | 1 - changelog.d/15615.misc | 1 - changelog.d/15620.misc | 1 - changelog.d/15621.misc | 1 - changelog.d/15624.bugfix | 1 - changelog.d/15625.misc | 1 - changelog.d/15626.misc | 1 - changelog.d/15630.misc | 1 - changelog.d/15633.misc | 1 - changelog.d/15634.bugfix | 1 - changelog.d/15636.misc | 1 - changelog.d/15639.misc | 1 - changelog.d/15640.misc | 1 - changelog.d/15641.misc | 1 - changelog.d/15642.misc | 1 - changelog.d/15643.misc | 1 - changelog.d/15644.feature | 1 - changelog.d/15646.misc | 1 - changelog.d/15647.bugfix | 1 - changelog.d/15648.doc | 1 - changelog.d/15651.misc | 1 - changelog.d/15658.misc | 1 - changelog.d/15659.misc | 1 - changelog.d/15663.misc | 1 - changelog.d/15665.misc | 1 - changelog.d/15666.misc | 1 - changelog.d/15668.doc | 1 - changelog.d/15678.misc | 1 - changelog.d/15681.misc | 1 - changelog.d/15682.misc | 1 - changelog.d/15683.misc | 1 - changelog.d/15684.misc | 1 - changelog.d/15685.misc | 1 - changelog.d/15686.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 53 files changed, 82 insertions(+), 51 deletions(-) delete mode 100644 changelog.d/10428.removal delete mode 100644 changelog.d/15464.bugfix delete mode 100644 changelog.d/15481.misc delete mode 100644 changelog.d/15537.misc delete mode 100644 changelog.d/15578.misc delete mode 100644 changelog.d/15585.feature delete mode 100644 changelog.d/15597.misc delete mode 100644 changelog.d/15599.bugfix delete mode 100644 changelog.d/15601.bugfix delete mode 100644 changelog.d/15602.misc delete mode 100644 changelog.d/15604.misc delete mode 100644 changelog.d/15606.misc delete mode 100644 changelog.d/15607.bugfix delete mode 100644 changelog.d/15610.misc delete mode 100644 changelog.d/15611.feature delete mode 100644 changelog.d/15613.doc delete mode 100644 changelog.d/15614.bugfix delete mode 100644 changelog.d/15615.misc delete mode 100644 changelog.d/15620.misc delete mode 100644 changelog.d/15621.misc delete mode 100644 changelog.d/15624.bugfix delete mode 100644 changelog.d/15625.misc delete mode 100644 changelog.d/15626.misc delete mode 100644 changelog.d/15630.misc delete mode 100644 changelog.d/15633.misc delete mode 100644 changelog.d/15634.bugfix delete mode 100644 changelog.d/15636.misc delete mode 100644 changelog.d/15639.misc delete mode 100644 changelog.d/15640.misc delete mode 100644 changelog.d/15641.misc delete mode 100644 changelog.d/15642.misc delete mode 100644 changelog.d/15643.misc delete mode 100644 changelog.d/15644.feature delete mode 100644 changelog.d/15646.misc delete mode 100644 changelog.d/15647.bugfix delete mode 100644 changelog.d/15648.doc delete mode 100644 changelog.d/15651.misc delete mode 100644 changelog.d/15658.misc delete mode 100644 changelog.d/15659.misc delete mode 100644 changelog.d/15663.misc delete mode 100644 changelog.d/15665.misc delete mode 100644 changelog.d/15666.misc delete mode 100644 changelog.d/15668.doc delete mode 100644 changelog.d/15678.misc delete mode 100644 changelog.d/15681.misc delete mode 100644 changelog.d/15682.misc delete mode 100644 changelog.d/15683.misc delete mode 100644 changelog.d/15684.misc delete mode 100644 changelog.d/15685.misc delete mode 100644 changelog.d/15686.misc diff --git a/CHANGES.md b/CHANGES.md index 85c9af8ce4..ba0995aa6f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,78 @@ +Synapse 1.85.0rc1 (2023-05-30) +============================== + +Features +-------- + +- Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) +- Add a new [admin API](https://matrix-org.github.io/synapse/v1.75/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.75/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) +- Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644)) + + +Bugfixes +-------- + +- Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). ([\#15464](https://github.com/matrix-org/synapse/issues/15464)) +- Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. ([\#15601](https://github.com/matrix-org/synapse/issues/15601)) +- Fix a long-standing bug where filters with multiple backslashes were rejected. ([\#15607](https://github.com/matrix-org/synapse/issues/15607)) +- Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. ([\#15614](https://github.com/matrix-org/synapse/issues/15614)) +- Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). ([\#15624](https://github.com/matrix-org/synapse/issues/15624)) +- Fix a long-standing bug where deactivated users were able to login in uncommon situations. ([\#15634](https://github.com/matrix-org/synapse/issues/15634)) + + +Improved Documentation +---------------------- + +- Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. ([\#15613](https://github.com/matrix-org/synapse/issues/15613)) +- Remove outdated comment from the generated and sample homeserver log configs. ([\#15648](https://github.com/matrix-org/synapse/issues/15648)) +- Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. ([\#15668](https://github.com/matrix-org/synapse/issues/15668)) + + +Deprecations and Removals +------------------------- + +- Remove the old version of the R30 (30-day retained users) phone-home metric. ([\#10428](https://github.com/matrix-org/synapse/issues/10428)) + + +Internal Changes +---------------- + +- Create dependabot changelogs at release time. ([\#15481](https://github.com/matrix-org/synapse/issues/15481)) +- Add not null constraint to column `full_user_id` of tables `profiles` and `user_filters`. ([\#15537](https://github.com/matrix-org/synapse/issues/15537)) +- Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. ([\#15578](https://github.com/matrix-org/synapse/issues/15578)) +- Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. ([\#15597](https://github.com/matrix-org/synapse/issues/15597)) +- Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. ([\#15602](https://github.com/matrix-org/synapse/issues/15602)) +- Fix subscriptable type usage in Python <3.9. ([\#15604](https://github.com/matrix-org/synapse/issues/15604)) +- Update internal terminology. ([\#15606](https://github.com/matrix-org/synapse/issues/15606), [\#15620](https://github.com/matrix-org/synapse/issues/15620)) +- Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) +- Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) +- Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) +- Update Mutual Rooms (MSC2666) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) +- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) +- Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) +- Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) +- Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630)) +- Limit the size of the `HomeServerConfig` cache in trial test runs. ([\#15646](https://github.com/matrix-org/synapse/issues/15646)) +- Improve type hints. ([\#15658](https://github.com/matrix-org/synapse/issues/15658), [\#15659](https://github.com/matrix-org/synapse/issues/15659)) +- Add requesting user id parameter to key claim methods in `TransportLayerClient`. ([\#15663](https://github.com/matrix-org/synapse/issues/15663)) +- Speed up rebuilding of the user directory for local users. ([\#15665](https://github.com/matrix-org/synapse/issues/15665)) +- Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. ([\#15666](https://github.com/matrix-org/synapse/issues/15666), [\#15678](https://github.com/matrix-org/synapse/issues/15678)) + +### Updates to locked dependencies + +* Bump furo from 2023.3.27 to 2023.5.20. ([\#15642](https://github.com/matrix-org/synapse/issues/15642)) +* Bump log from 0.4.17 to 0.4.18. ([\#15681](https://github.com/matrix-org/synapse/issues/15681)) +* Bump prometheus-client from 0.16.0 to 0.17.0. ([\#15682](https://github.com/matrix-org/synapse/issues/15682)) +* Bump pydantic from 1.10.7 to 1.10.8. ([\#15685](https://github.com/matrix-org/synapse/issues/15685)) +* Bump pygithub from 1.58.1 to 1.58.2. ([\#15643](https://github.com/matrix-org/synapse/issues/15643)) +* Bump requests from 2.28.2 to 2.31.0. ([\#15651](https://github.com/matrix-org/synapse/issues/15651)) +* Bump sphinx from 6.1.3 to 6.2.1. ([\#15641](https://github.com/matrix-org/synapse/issues/15641)) +* Bump types-bleach from 6.0.0.1 to 6.0.0.3. ([\#15686](https://github.com/matrix-org/synapse/issues/15686)) +* Bump types-pillow from 9.5.0.2 to 9.5.0.4. ([\#15640](https://github.com/matrix-org/synapse/issues/15640)) +* Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. ([\#15683](https://github.com/matrix-org/synapse/issues/15683)) +* Bump types-requests from 2.30.0.0 to 2.31.0.0. ([\#15684](https://github.com/matrix-org/synapse/issues/15684)) +* Bump types-setuptools from 67.7.0.2 to 67.8.0.0. ([\#15639](https://github.com/matrix-org/synapse/issues/15639)) + Synapse 1.84.1 (2023-05-26) =========================== diff --git a/changelog.d/10428.removal b/changelog.d/10428.removal deleted file mode 100644 index c056e89585..0000000000 --- a/changelog.d/10428.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the old version of the R30 (30-day retained users) phone-home metric. diff --git a/changelog.d/15464.bugfix b/changelog.d/15464.bugfix deleted file mode 100644 index 3c655989b3..0000000000 --- a/changelog.d/15464.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/15481.misc b/changelog.d/15481.misc deleted file mode 100644 index a6e088c164..0000000000 --- a/changelog.d/15481.misc +++ /dev/null @@ -1 +0,0 @@ -Create dependabot changelogs at release time. diff --git a/changelog.d/15537.misc b/changelog.d/15537.misc deleted file mode 100644 index 979e0ba977..0000000000 --- a/changelog.d/15537.misc +++ /dev/null @@ -1 +0,0 @@ -Add not null constraint to column full_user_id of tables profiles and user_filters. diff --git a/changelog.d/15578.misc b/changelog.d/15578.misc deleted file mode 100644 index a54422239b..0000000000 --- a/changelog.d/15578.misc +++ /dev/null @@ -1 +0,0 @@ -Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. diff --git a/changelog.d/15585.feature b/changelog.d/15585.feature deleted file mode 100644 index 1adcfb69ee..0000000000 --- a/changelog.d/15585.feature +++ /dev/null @@ -1 +0,0 @@ -Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. diff --git a/changelog.d/15597.misc b/changelog.d/15597.misc deleted file mode 100644 index 2dea23784f..0000000000 --- a/changelog.d/15597.misc +++ /dev/null @@ -1 +0,0 @@ -Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/changelog.d/15599.bugfix b/changelog.d/15599.bugfix deleted file mode 100644 index b58af8ad55..0000000000 --- a/changelog.d/15599.bugfix +++ /dev/null @@ -1 +0,0 @@ -Print full error and stack-trace of any exception that occurs during startup/initialization. diff --git a/changelog.d/15601.bugfix b/changelog.d/15601.bugfix deleted file mode 100644 index 426db6cea3..0000000000 --- a/changelog.d/15601.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. diff --git a/changelog.d/15602.misc b/changelog.d/15602.misc deleted file mode 100644 index cdd0c039bd..0000000000 --- a/changelog.d/15602.misc +++ /dev/null @@ -1 +0,0 @@ -Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. diff --git a/changelog.d/15604.misc b/changelog.d/15604.misc deleted file mode 100644 index 92d1d600bc..0000000000 --- a/changelog.d/15604.misc +++ /dev/null @@ -1 +0,0 @@ -Fix subscriptable type usage in Python <3.9. diff --git a/changelog.d/15606.misc b/changelog.d/15606.misc deleted file mode 100644 index 568c0d3fc5..0000000000 --- a/changelog.d/15606.misc +++ /dev/null @@ -1 +0,0 @@ -Update internal terminology. diff --git a/changelog.d/15607.bugfix b/changelog.d/15607.bugfix deleted file mode 100644 index a2767adbe2..0000000000 --- a/changelog.d/15607.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where filters with multiple backslashes were rejected. diff --git a/changelog.d/15610.misc b/changelog.d/15610.misc deleted file mode 100644 index 2eff30f6e3..0000000000 --- a/changelog.d/15610.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15611.feature b/changelog.d/15611.feature deleted file mode 100644 index 7cfb46fd0a..0000000000 --- a/changelog.d/15611.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new admin API to create a new device for a user. diff --git a/changelog.d/15613.doc b/changelog.d/15613.doc deleted file mode 100644 index 94733facf0..0000000000 --- a/changelog.d/15613.doc +++ /dev/null @@ -1 +0,0 @@ -Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. diff --git a/changelog.d/15614.bugfix b/changelog.d/15614.bugfix deleted file mode 100644 index b523ae6eb1..0000000000 --- a/changelog.d/15614.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. diff --git a/changelog.d/15615.misc b/changelog.d/15615.misc deleted file mode 100644 index a39fd0a098..0000000000 --- a/changelog.d/15615.misc +++ /dev/null @@ -1 +0,0 @@ -Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. diff --git a/changelog.d/15620.misc b/changelog.d/15620.misc deleted file mode 100644 index 568c0d3fc5..0000000000 --- a/changelog.d/15620.misc +++ /dev/null @@ -1 +0,0 @@ -Update internal terminology. diff --git a/changelog.d/15621.misc b/changelog.d/15621.misc deleted file mode 100644 index 5d060f4dbc..0000000000 --- a/changelog.d/15621.misc +++ /dev/null @@ -1 +0,0 @@ -Update Mutual Rooms (MSC2666) implementation to match new proposal text. \ No newline at end of file diff --git a/changelog.d/15624.bugfix b/changelog.d/15624.bugfix deleted file mode 100644 index fde515ba62..0000000000 --- a/changelog.d/15624.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). diff --git a/changelog.d/15625.misc b/changelog.d/15625.misc deleted file mode 100644 index 7ea8cc9433..0000000000 --- a/changelog.d/15625.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/changelog.d/15626.misc b/changelog.d/15626.misc deleted file mode 100644 index 0016cdbf10..0000000000 --- a/changelog.d/15626.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the olddeps CI. diff --git a/changelog.d/15630.misc b/changelog.d/15630.misc deleted file mode 100644 index a30304bfd6..0000000000 --- a/changelog.d/15630.misc +++ /dev/null @@ -1 +0,0 @@ -Fix two memory leaks in `trial` test runs. diff --git a/changelog.d/15633.misc b/changelog.d/15633.misc deleted file mode 100644 index 4126a20602..0000000000 --- a/changelog.d/15633.misc +++ /dev/null @@ -1 +0,0 @@ -Trace how many new events from the backfill response we need to process. diff --git a/changelog.d/15634.bugfix b/changelog.d/15634.bugfix deleted file mode 100644 index ef39e8a689..0000000000 --- a/changelog.d/15634.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where deactivated users were able to login in uncommon situations. diff --git a/changelog.d/15636.misc b/changelog.d/15636.misc deleted file mode 100644 index 82329c5e43..0000000000 --- a/changelog.d/15636.misc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate timestamp from test logs (`_trial_temp/test.log`). diff --git a/changelog.d/15639.misc b/changelog.d/15639.misc deleted file mode 100644 index 92230e206f..0000000000 --- a/changelog.d/15639.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.7.0.2 to 67.8.0.0. diff --git a/changelog.d/15640.misc b/changelog.d/15640.misc deleted file mode 100644 index 4c2a3dbc52..0000000000 --- a/changelog.d/15640.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.5.0.2 to 9.5.0.4. diff --git a/changelog.d/15641.misc b/changelog.d/15641.misc deleted file mode 100644 index a85d85c58e..0000000000 --- a/changelog.d/15641.misc +++ /dev/null @@ -1 +0,0 @@ -Bump sphinx from 6.1.3 to 6.2.1. diff --git a/changelog.d/15642.misc b/changelog.d/15642.misc deleted file mode 100644 index 5d6125140d..0000000000 --- a/changelog.d/15642.misc +++ /dev/null @@ -1 +0,0 @@ -Bump furo from 2023.3.27 to 2023.5.20. diff --git a/changelog.d/15643.misc b/changelog.d/15643.misc deleted file mode 100644 index 5bd2e74071..0000000000 --- a/changelog.d/15643.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pygithub from 1.58.1 to 1.58.2. diff --git a/changelog.d/15644.feature b/changelog.d/15644.feature deleted file mode 100644 index 1b6126af53..0000000000 --- a/changelog.d/15644.feature +++ /dev/null @@ -1 +0,0 @@ -Add Unix socket support for Redis connections. Contributed by Jason Little. diff --git a/changelog.d/15646.misc b/changelog.d/15646.misc deleted file mode 100644 index 872afe30b8..0000000000 --- a/changelog.d/15646.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the size of the `HomeServerConfig` cache in trial test runs. diff --git a/changelog.d/15647.bugfix b/changelog.d/15647.bugfix deleted file mode 100644 index 2eff30f6e3..0000000000 --- a/changelog.d/15647.bugfix +++ /dev/null @@ -1 +0,0 @@ -Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. diff --git a/changelog.d/15648.doc b/changelog.d/15648.doc deleted file mode 100644 index 70f65ebbff..0000000000 --- a/changelog.d/15648.doc +++ /dev/null @@ -1 +0,0 @@ -Remove outdated comment from the generated and sample homeserver log configs. \ No newline at end of file diff --git a/changelog.d/15651.misc b/changelog.d/15651.misc deleted file mode 100644 index 4d7c0248b2..0000000000 --- a/changelog.d/15651.misc +++ /dev/null @@ -1 +0,0 @@ -Bump requests from 2.28.2 to 2.31.0. diff --git a/changelog.d/15658.misc b/changelog.d/15658.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15658.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15659.misc b/changelog.d/15659.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15659.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15663.misc b/changelog.d/15663.misc deleted file mode 100644 index cc5f801543..0000000000 --- a/changelog.d/15663.misc +++ /dev/null @@ -1 +0,0 @@ -Add requesting user id parameter to key claim methods in `TransportLayerClient`. diff --git a/changelog.d/15665.misc b/changelog.d/15665.misc deleted file mode 100644 index 7ad424d8df..0000000000 --- a/changelog.d/15665.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up rebuilding of the user directory for local users. diff --git a/changelog.d/15666.misc b/changelog.d/15666.misc deleted file mode 100644 index 92eae49952..0000000000 --- a/changelog.d/15666.misc +++ /dev/null @@ -1 +0,0 @@ -Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/changelog.d/15668.doc b/changelog.d/15668.doc deleted file mode 100644 index 3526a4d50c..0000000000 --- a/changelog.d/15668.doc +++ /dev/null @@ -1 +0,0 @@ -Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. diff --git a/changelog.d/15678.misc b/changelog.d/15678.misc deleted file mode 100644 index 92eae49952..0000000000 --- a/changelog.d/15678.misc +++ /dev/null @@ -1 +0,0 @@ -Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. \ No newline at end of file diff --git a/changelog.d/15681.misc b/changelog.d/15681.misc deleted file mode 100644 index 2de551dd63..0000000000 --- a/changelog.d/15681.misc +++ /dev/null @@ -1 +0,0 @@ -Bump log from 0.4.17 to 0.4.18. diff --git a/changelog.d/15682.misc b/changelog.d/15682.misc deleted file mode 100644 index 687af7d8d7..0000000000 --- a/changelog.d/15682.misc +++ /dev/null @@ -1 +0,0 @@ -Bump prometheus-client from 0.16.0 to 0.17.0. diff --git a/changelog.d/15683.misc b/changelog.d/15683.misc deleted file mode 100644 index 147f13b99c..0000000000 --- a/changelog.d/15683.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. diff --git a/changelog.d/15684.misc b/changelog.d/15684.misc deleted file mode 100644 index 4c2edf87fd..0000000000 --- a/changelog.d/15684.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.30.0.0 to 2.31.0.0. diff --git a/changelog.d/15685.misc b/changelog.d/15685.misc deleted file mode 100644 index 7d4cf65bf3..0000000000 --- a/changelog.d/15685.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pydantic from 1.10.7 to 1.10.8. diff --git a/changelog.d/15686.misc b/changelog.d/15686.misc deleted file mode 100644 index feacbf35d6..0000000000 --- a/changelog.d/15686.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-bleach from 6.0.0.1 to 6.0.0.3. diff --git a/debian/changelog b/debian/changelog index fbdc9c177e..2d88cd9d29 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium + + * New Synapse release 1.85.0rc1. + + -- Synapse Packaging team Tue, 30 May 2023 13:56:54 +0100 + matrix-synapse-py3 (1.84.1) stable; urgency=medium * New Synapse release 1.84.1. diff --git a/pyproject.toml b/pyproject.toml index 6e9bce65b6..7227bc7523 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.84.1" +version = "1.85.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From cebff6f4d584683bc122686e38342dbd8699818e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:05:44 +0100 Subject: [PATCH 043/562] Tweak release script dependabot wording --- scripts-dev/release.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 257d1e9ebd..89ffba8d92 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -875,6 +875,8 @@ def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> for i, message in enumerate(messages): messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message) messages.insert(0, "### Updates to locked dependencies\n") + # Add an extra blank line to the bottom of the section + messages.append("") return "\n".join(messages) From 3389653e1522c9aaea227b2afa36acd5db3ad9fe Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:18:42 +0100 Subject: [PATCH 044/562] Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ba0995aa6f..636c591568 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -47,8 +47,8 @@ Internal Changes - Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) - Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) - Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) -- Update Mutual Rooms (MSC2666) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) -- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) +- Update Mutual Rooms ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) +- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) - Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) - Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) - Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630)) From 7477810cc2be241d6f86a1d787fe469c69a84358 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 30 May 2023 14:33:05 +0100 Subject: [PATCH 045/562] fixup changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 636c591568..14aac9f14e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,8 +4,8 @@ Synapse 1.85.0rc1 (2023-05-30) Features -------- -- Process previously failed backfill events in the background to avoid blocking requests for something that is bound to fail again. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) -- Add a new [admin API](https://matrix-org.github.io/synapse/v1.75/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.75/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) +- Improve performance of backfill requests by performing backfill of previously failed requests in the background. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) +- Add a new [admin API](https://matrix-org.github.io/synapse/v1.85/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.85/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) - Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644)) From 5d8c659373ae2b169892fc9d99d54bd1b3baf65a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 30 May 2023 14:37:39 +0100 Subject: [PATCH 046/562] Remove unused `FederationServer.__str__` override (#15690) Signed-off-by: Sean Quah --- changelog.d/15690.misc | 1 + synapse/federation/federation_server.py | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) create mode 100644 changelog.d/15690.misc diff --git a/changelog.d/15690.misc b/changelog.d/15690.misc new file mode 100644 index 0000000000..c6c259eb7d --- /dev/null +++ b/changelog.d/15690.misc @@ -0,0 +1 @@ +Remove some unused code. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index f4ca70a698..e17cb840de 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1291,9 +1291,6 @@ class FederationServer(FederationBase): return lock = new_lock - def __str__(self) -> str: - return "" % self.server_name - async def exchange_third_party_invite( self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict ) -> None: From e2c8458bba5ab20f84c93a6c68e293b2d304cdc0 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 17 Jun 2022 14:48:55 +0200 Subject: [PATCH 047/562] Make the api.auth.Auth a Protocol --- synapse/api/auth/__init__.py | 175 ++++++++++++++ synapse/api/auth/base.py | 273 ++++++++++++++++++++++ synapse/api/{auth.py => auth/internal.py} | 249 +------------------- synapse/server.py | 3 +- tests/api/test_auth.py | 4 +- tests/handlers/test_register.py | 4 +- tests/test_state.py | 4 +- 7 files changed, 464 insertions(+), 248 deletions(-) create mode 100644 synapse/api/auth/__init__.py create mode 100644 synapse/api/auth/base.py rename synapse/api/{auth.py => auth/internal.py} (61%) diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py new file mode 100644 index 0000000000..90cfe39d76 --- /dev/null +++ b/synapse/api/auth/__init__.py @@ -0,0 +1,175 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +from typing_extensions import Protocol + +from twisted.web.server import Request + +from synapse.appservice import ApplicationService +from synapse.http.site import SynapseRequest +from synapse.types import Requester + +# guests always get this device id. +GUEST_DEVICE_ID = "guest_device" + + +class Auth(Protocol): + """The interface that an auth provider must implement.""" + + async def check_user_in_room( + self, + room_id: str, + requester: Requester, + allow_departed_users: bool = False, + ) -> Tuple[str, Optional[str]]: + """Check if the user is in the room, or was at some point. + Args: + room_id: The room to check. + + user_id: The user to check. + + current_state: Optional map of the current state of the room. + If provided then that map is used to check whether they are a + member of the room. Otherwise the current membership is + loaded from the database. + + allow_departed_users: if True, accept users that were previously + members but have now departed. + + Raises: + AuthError if the user is/was not in the room. + Returns: + The current membership of the user in the room and the + membership event ID of the user. + """ + + async def get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: + """Get a registered user's ID. + + Args: + request: An HTTP request with an access_token query parameter. + allow_guest: If False, will raise an AuthError if the user making the + request is a guest. + allow_expired: If True, allow the request through even if the account + is expired, or session token lifetime has ended. Note that + /login will deliver access tokens regardless of expiration. + + Returns: + Resolves to the requester + Raises: + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. + AuthError if access is denied for the user in the access token + """ + + async def validate_appservice_can_control_user_id( + self, app_service: ApplicationService, user_id: str + ) -> None: + """Validates that the app service is allowed to control + the given user. + + Args: + app_service: The app service that controls the user + user_id: The author MXID that the app service is controlling + + Raises: + AuthError: If the application service is not allowed to control the user + (user namespace regex does not match, wrong homeserver, etc) + or if the user has not been registered yet. + """ + + async def get_user_by_access_token( + self, + token: str, + allow_expired: bool = False, + ) -> Requester: + """Validate access token and get user_id from it + + Args: + token: The access token to get the user by + allow_expired: If False, raises an InvalidClientTokenError + if the token is expired + + Raises: + InvalidClientTokenError if a user by that token exists, but the token is + expired + InvalidClientCredentialsError if no user by that token exists or the token + is invalid + """ + + async def is_server_admin(self, requester: Requester) -> bool: + """Check if the given user is a local server admin. + + Args: + requester: user to check + + Returns: + True if the user is an admin + """ + + async def check_can_change_room_list( + self, room_id: str, requester: Requester + ) -> bool: + """Determine whether the user is allowed to edit the room's entry in the + published room list. + + Args: + room_id + user + """ + + @staticmethod + def has_access_token(request: Request) -> bool: + """Checks if the request has an access_token. + + Returns: + False if no access_token was given, True otherwise. + """ + + @staticmethod + def get_access_token_from_request(request: Request) -> str: + """Extracts the access_token from the request. + + Args: + request: The http request. + Returns: + The access_token + Raises: + MissingClientTokenError: If there isn't a single access_token in the + request + """ + + async def check_user_in_room_or_world_readable( + self, room_id: str, requester: Requester, allow_departed_users: bool = False + ) -> Tuple[str, Optional[str]]: + """Checks that the user is or was in the room or the room is world + readable. If it isn't then an exception is raised. + + Args: + room_id: room to check + user_id: user to check + allow_departed_users: if True, accept users that were previously + members but have now departed + + Returns: + Resolves to the current membership of the user in the room and the + membership event ID of the user. If the user is not in the room and + never has been, then `(Membership.JOIN, None)` is returned. + """ diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py new file mode 100644 index 0000000000..240f2b90de --- /dev/null +++ b/synapse/api/auth/base.py @@ -0,0 +1,273 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Optional, Tuple + +from twisted.web.server import Request + +from synapse import event_auth +from synapse.api.constants import EventTypes, HistoryVisibility, Membership +from synapse.api.errors import ( + AuthError, + Codes, + MissingClientTokenError, + UnstableSpecAuthError, +) +from synapse.appservice import ApplicationService +from synapse.logging.opentracing import trace +from synapse.types import Requester + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class BaseAuth: + """Common base class for all auth implementations.""" + + def __init__(self, hs: "HomeServer"): + self.hs = hs + self.store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + + async def check_user_in_room( + self, + room_id: str, + requester: Requester, + allow_departed_users: bool = False, + ) -> Tuple[str, Optional[str]]: + """Check if the user is in the room, or was at some point. + Args: + room_id: The room to check. + + requester: The user making the request, according to the access token. + + current_state: Optional map of the current state of the room. + If provided then that map is used to check whether they are a + member of the room. Otherwise the current membership is + loaded from the database. + + allow_departed_users: if True, accept users that were previously + members but have now departed. + + Raises: + AuthError if the user is/was not in the room. + Returns: + The current membership of the user in the room and the + membership event ID of the user. + """ + + user_id = requester.user.to_string() + ( + membership, + member_event_id, + ) = await self.store.get_local_current_membership_for_user_in_room( + user_id=user_id, + room_id=room_id, + ) + + if membership: + if membership == Membership.JOIN: + return membership, member_event_id + + # XXX this looks totally bogus. Why do we not allow users who have been banned, + # or those who were members previously and have been re-invited? + if allow_departed_users and membership == Membership.LEAVE: + forgot = await self.store.did_forget(user_id, room_id) + if not forgot: + return membership, member_event_id + raise UnstableSpecAuthError( + 403, + "User %s not in room %s" % (user_id, room_id), + errcode=Codes.NOT_JOINED, + ) + + @trace + async def check_user_in_room_or_world_readable( + self, room_id: str, requester: Requester, allow_departed_users: bool = False + ) -> Tuple[str, Optional[str]]: + """Checks that the user is or was in the room or the room is world + readable. If it isn't then an exception is raised. + + Args: + room_id: room to check + user_id: user to check + allow_departed_users: if True, accept users that were previously + members but have now departed + + Returns: + Resolves to the current membership of the user in the room and the + membership event ID of the user. If the user is not in the room and + never has been, then `(Membership.JOIN, None)` is returned. + """ + + try: + # check_user_in_room will return the most recent membership + # event for the user if: + # * The user is a non-guest user, and was ever in the room + # * The user is a guest user, and has joined the room + # else it will throw. + return await self.check_user_in_room( + room_id, requester, allow_departed_users=allow_departed_users + ) + except AuthError: + visibility = await self._storage_controllers.state.get_current_state_event( + room_id, EventTypes.RoomHistoryVisibility, "" + ) + if ( + visibility + and visibility.content.get("history_visibility") + == HistoryVisibility.WORLD_READABLE + ): + return Membership.JOIN, None + raise AuthError( + 403, + "User %r not in room %s, and room previews are disabled" + % (requester.user, room_id), + ) + + async def validate_appservice_can_control_user_id( + self, app_service: ApplicationService, user_id: str + ) -> None: + """Validates that the app service is allowed to control + the given user. + + Args: + app_service: The app service that controls the user + user_id: The author MXID that the app service is controlling + + Raises: + AuthError: If the application service is not allowed to control the user + (user namespace regex does not match, wrong homeserver, etc) + or if the user has not been registered yet. + """ + + # It's ok if the app service is trying to use the sender from their registration + if app_service.sender == user_id: + pass + # Check to make sure the app service is allowed to control the user + elif not app_service.is_interested_in_user(user_id): + raise AuthError( + 403, + "Application service cannot masquerade as this user (%s)." % user_id, + ) + # Check to make sure the user is already registered on the homeserver + elif not (await self.store.get_user_by_id(user_id)): + raise AuthError( + 403, "Application service has not registered this user (%s)" % user_id + ) + + async def is_server_admin(self, requester: Requester) -> bool: + """Check if the given user is a local server admin. + + Args: + requester: user to check + + Returns: + True if the user is an admin + """ + raise NotImplementedError() + + async def check_can_change_room_list( + self, room_id: str, requester: Requester + ) -> bool: + """Determine whether the user is allowed to edit the room's entry in the + published room list. + + Args: + room_id + user + """ + + is_admin = await self.is_server_admin(requester) + if is_admin: + return True + + await self.check_user_in_room(room_id, requester) + + # We currently require the user is a "moderator" in the room. We do this + # by checking if they would (theoretically) be able to change the + # m.room.canonical_alias events + + power_level_event = ( + await self._storage_controllers.state.get_current_state_event( + room_id, EventTypes.PowerLevels, "" + ) + ) + + auth_events = {} + if power_level_event: + auth_events[(EventTypes.PowerLevels, "")] = power_level_event + + send_level = event_auth.get_send_level( + EventTypes.CanonicalAlias, "", power_level_event + ) + user_level = event_auth.get_user_power_level( + requester.user.to_string(), auth_events + ) + + return user_level >= send_level + + @staticmethod + def has_access_token(request: Request) -> bool: + """Checks if the request has an access_token. + + Returns: + False if no access_token was given, True otherwise. + """ + # This will always be set by the time Twisted calls us. + assert request.args is not None + + query_params = request.args.get(b"access_token") + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") + return bool(query_params) or bool(auth_headers) + + @staticmethod + def get_access_token_from_request(request: Request) -> str: + """Extracts the access_token from the request. + + Args: + request: The http request. + Returns: + The access_token + Raises: + MissingClientTokenError: If there isn't a single access_token in the + request + """ + # This will always be set by the time Twisted calls us. + assert request.args is not None + + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") + query_params = request.args.get(b"access_token") + if auth_headers: + # Try the get the access_token from a "Authorization: Bearer" + # header + if query_params is not None: + raise MissingClientTokenError( + "Mixing Authorization headers and access_token query parameters." + ) + if len(auth_headers) > 1: + raise MissingClientTokenError("Too many Authorization headers.") + parts = auth_headers[0].split(b" ") + if parts[0] == b"Bearer" and len(parts) == 2: + return parts[1].decode("ascii") + else: + raise MissingClientTokenError("Invalid Authorization header.") + else: + # Try to get the access_token from the query params. + if not query_params: + raise MissingClientTokenError() + + return query_params[0].decode("ascii") diff --git a/synapse/api/auth.py b/synapse/api/auth/internal.py similarity index 61% rename from synapse/api/auth.py rename to synapse/api/auth/internal.py index 66e869bc2d..813d537e53 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth/internal.py @@ -1,4 +1,4 @@ -# Copyright 2014 - 2016 OpenMarket Ltd +# Copyright 2023 The Matrix.org Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,113 +12,49 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional import pymacaroons from netaddr import IPAddress from twisted.web.server import Request -from synapse import event_auth -from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import ( AuthError, Codes, InvalidClientTokenError, MissingClientTokenError, - UnstableSpecAuthError, ) -from synapse.appservice import ApplicationService from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import ( - active_span, - force_tracing, - start_active_span, - trace, -) +from synapse.logging.opentracing import active_span, force_tracing, start_active_span from synapse.types import Requester, create_requester from synapse.util.cancellation import cancellable +from . import GUEST_DEVICE_ID +from .base import BaseAuth + if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -# guests always get this device id. -GUEST_DEVICE_ID = "guest_device" - - -class Auth: +class InternalAuth(BaseAuth): """ This class contains functions for authenticating users of our client-server API. """ def __init__(self, hs: "HomeServer"): - self.hs = hs + super().__init__(hs) self.clock = hs.get_clock() - self.store = hs.get_datastores().main self._account_validity_handler = hs.get_account_validity_handler() - self._storage_controllers = hs.get_storage_controllers() self._macaroon_generator = hs.get_macaroon_generator() self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users - async def check_user_in_room( - self, - room_id: str, - requester: Requester, - allow_departed_users: bool = False, - ) -> Tuple[str, Optional[str]]: - """Check if the user is in the room, or was at some point. - Args: - room_id: The room to check. - - requester: The user making the request, according to the access token. - - current_state: Optional map of the current state of the room. - If provided then that map is used to check whether they are a - member of the room. Otherwise the current membership is - loaded from the database. - - allow_departed_users: if True, accept users that were previously - members but have now departed. - - Raises: - AuthError if the user is/was not in the room. - Returns: - The current membership of the user in the room and the - membership event ID of the user. - """ - - user_id = requester.user.to_string() - ( - membership, - member_event_id, - ) = await self.store.get_local_current_membership_for_user_in_room( - user_id=user_id, - room_id=room_id, - ) - - if membership: - if membership == Membership.JOIN: - return membership, member_event_id - - # XXX this looks totally bogus. Why do we not allow users who have been banned, - # or those who were members previously and have been re-invited? - if allow_departed_users and membership == Membership.LEAVE: - forgot = await self.store.did_forget(user_id, room_id) - if not forgot: - return membership, member_event_id - raise UnstableSpecAuthError( - 403, - "User %s not in room %s" % (user_id, room_id), - errcode=Codes.NOT_JOINED, - ) - @cancellable async def get_user_by_req( self, @@ -253,37 +189,6 @@ class Auth: except KeyError: raise MissingClientTokenError() - async def validate_appservice_can_control_user_id( - self, app_service: ApplicationService, user_id: str - ) -> None: - """Validates that the app service is allowed to control - the given user. - - Args: - app_service: The app service that controls the user - user_id: The author MXID that the app service is controlling - - Raises: - AuthError: If the application service is not allowed to control the user - (user namespace regex does not match, wrong homeserver, etc) - or if the user has not been registered yet. - """ - - # It's ok if the app service is trying to use the sender from their registration - if app_service.sender == user_id: - pass - # Check to make sure the app service is allowed to control the user - elif not app_service.is_interested_in_user(user_id): - raise AuthError( - 403, - "Application service cannot masquerade as this user (%s)." % user_id, - ) - # Check to make sure the user is already registered on the homeserver - elif not (await self.store.get_user_by_id(user_id)): - raise AuthError( - 403, "Application service has not registered this user (%s)" % user_id - ) - @cancellable async def _get_appservice_user(self, request: Request) -> Optional[Requester]: """ @@ -462,141 +367,3 @@ class Auth: True if the user is an admin """ return await self.store.is_server_admin(requester.user) - - async def check_can_change_room_list( - self, room_id: str, requester: Requester - ) -> bool: - """Determine whether the user is allowed to edit the room's entry in the - published room list. - - Args: - room_id: The room to check. - requester: The user making the request, according to the access token. - """ - - is_admin = await self.is_server_admin(requester) - if is_admin: - return True - - await self.check_user_in_room(room_id, requester) - - # We currently require the user is a "moderator" in the room. We do this - # by checking if they would (theoretically) be able to change the - # m.room.canonical_alias events - - power_level_event = ( - await self._storage_controllers.state.get_current_state_event( - room_id, EventTypes.PowerLevels, "" - ) - ) - - auth_events = {} - if power_level_event: - auth_events[(EventTypes.PowerLevels, "")] = power_level_event - - send_level = event_auth.get_send_level( - EventTypes.CanonicalAlias, "", power_level_event - ) - user_level = event_auth.get_user_power_level( - requester.user.to_string(), auth_events - ) - - return user_level >= send_level - - @staticmethod - def has_access_token(request: Request) -> bool: - """Checks if the request has an access_token. - - Returns: - False if no access_token was given, True otherwise. - """ - # This will always be set by the time Twisted calls us. - assert request.args is not None - - query_params = request.args.get(b"access_token") - auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") - return bool(query_params) or bool(auth_headers) - - @staticmethod - @cancellable - def get_access_token_from_request(request: Request) -> str: - """Extracts the access_token from the request. - - Args: - request: The http request. - Returns: - The access_token - Raises: - MissingClientTokenError: If there isn't a single access_token in the - request - """ - # This will always be set by the time Twisted calls us. - assert request.args is not None - - auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") - query_params = request.args.get(b"access_token") - if auth_headers: - # Try the get the access_token from a "Authorization: Bearer" - # header - if query_params is not None: - raise MissingClientTokenError( - "Mixing Authorization headers and access_token query parameters." - ) - if len(auth_headers) > 1: - raise MissingClientTokenError("Too many Authorization headers.") - parts = auth_headers[0].split(b" ") - if parts[0] == b"Bearer" and len(parts) == 2: - return parts[1].decode("ascii") - else: - raise MissingClientTokenError("Invalid Authorization header.") - else: - # Try to get the access_token from the query params. - if not query_params: - raise MissingClientTokenError() - - return query_params[0].decode("ascii") - - @trace - async def check_user_in_room_or_world_readable( - self, room_id: str, requester: Requester, allow_departed_users: bool = False - ) -> Tuple[str, Optional[str]]: - """Checks that the user is or was in the room or the room is world - readable. If it isn't then an exception is raised. - - Args: - room_id: The room to check. - requester: The user making the request, according to the access token. - allow_departed_users: If True, accept users that were previously - members but have now departed. - - Returns: - Resolves to the current membership of the user in the room and the - membership event ID of the user. If the user is not in the room and - never has been, then `(Membership.JOIN, None)` is returned. - """ - - try: - # check_user_in_room will return the most recent membership - # event for the user if: - # * The user is a non-guest user, and was ever in the room - # * The user is a guest user, and has joined the room - # else it will throw. - return await self.check_user_in_room( - room_id, requester, allow_departed_users=allow_departed_users - ) - except AuthError: - visibility = await self._storage_controllers.state.get_current_state_event( - room_id, EventTypes.RoomHistoryVisibility, "" - ) - if ( - visibility - and visibility.content.get("history_visibility") - == HistoryVisibility.WORLD_READABLE - ): - return Membership.JOIN, None - raise UnstableSpecAuthError( - 403, - "User %s not in room %s, and room previews are disabled" - % (requester.user, room_id), - errcode=Codes.NOT_JOINED, - ) diff --git a/synapse/server.py b/synapse/server.py index cce5fb66ff..df88af12a9 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -31,6 +31,7 @@ from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.auth_blocking import AuthBlocking from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter @@ -427,7 +428,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_auth(self) -> Auth: - return Auth(self) + return InternalAuth(self) @cache_in_self def get_auth_blocking(self) -> AuthBlocking: diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 6e36e73f0d..3dac52d178 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -18,7 +18,7 @@ import pymacaroons from twisted.test.proto_helpers import MemoryReactor -from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import UserTypes from synapse.api.errors import ( @@ -48,7 +48,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # have been called by the HomeserverTestCase machinery. hs.datastores.main = self.store # type: ignore[union-attr] hs.get_auth_handler().store = self.store - self.auth = Auth(hs) + self.auth = InternalAuth(hs) # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 73822b07a5..8d8584609b 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -17,7 +17,7 @@ from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor -from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.constants import UserTypes from synapse.api.errors import ( CodeMessageException, @@ -683,7 +683,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): request = Mock(args={}) request.args[b"access_token"] = [token.encode("ascii")] request.requestHeaders.getRawHeaders = mock_getRawHeaders() - auth = Auth(self.hs) + auth = InternalAuth(self.hs) requester = self.get_success(auth.get_user_by_req(request)) self.assertTrue(requester.shadow_banned) diff --git a/tests/test_state.py b/tests/test_state.py index ddf59916b1..7a49b87953 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -28,7 +28,7 @@ from unittest.mock import Mock from twisted.internet import defer -from synapse.api.auth import Auth +from synapse.api.auth.internal import InternalAuth from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, make_event_from_dict @@ -240,7 +240,7 @@ class StateTestCase(unittest.TestCase): hs.get_macaroon_generator.return_value = MacaroonGenerator( clock, "tesths", b"verysecret" ) - hs.get_auth.return_value = Auth(hs) + hs.get_auth.return_value = InternalAuth(hs) hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs) hs.get_storage_controllers.return_value = storage_controllers From 765244faeef9e20c573d2c7935f05f76aeca1c28 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 13 Sep 2022 17:54:32 +0200 Subject: [PATCH 048/562] Initial MSC3964 support: delegation of auth to OIDC server --- synapse/api/auth/oauth_delegated.py | 227 ++++++++++++++++++++++++++++ synapse/config/auth.py | 30 +++- synapse/server.py | 4 + 3 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 synapse/api/auth/oauth_delegated.py diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py new file mode 100644 index 0000000000..b3b5c29a94 --- /dev/null +++ b/synapse/api/auth/oauth_delegated.py @@ -0,0 +1,227 @@ +# Copyright 2023 The Matrix.org Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Any, Dict, List, Optional +from urllib.parse import urlencode + +from authlib.oauth2 import ClientAuth +from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret_post +from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign +from authlib.oauth2.rfc7662 import IntrospectionToken +from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url + +from twisted.web.client import readBody +from twisted.web.http_headers import Headers + +from synapse.api.auth.base import BaseAuth +from synapse.api.errors import AuthError, StoreError +from synapse.http.site import SynapseRequest +from synapse.logging.context import make_deferred_yieldable +from synapse.types import Requester, UserID, create_requester +from synapse.util import json_decoder +from synapse.util.caches.cached_call import RetryOnExceptionCachedCall + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +def scope_to_list(scope: str) -> List[str]: + """Convert a scope string to a list of scope tokens""" + return scope.strip().split(" ") + + +class PrivateKeyJWTWithKid(PrivateKeyJWT): + """An implementation of the private_key_jwt client auth method that includes a kid header. + + This is needed because some providers (Keycloak) require the kid header to figure + out which key to use to verify the signature. + """ + + def sign(self, auth: Any, token_endpoint: str) -> bytes: + return private_key_jwt_sign( + auth.client_secret, + client_id=auth.client_id, + token_endpoint=token_endpoint, + claims=self.claims, + header={"kid": auth.client_secret["kid"]}, + ) + + +class OAuthDelegatedAuth(BaseAuth): + AUTH_METHODS = { + "client_secret_post": encode_client_secret_post, + "client_secret_basic": encode_client_secret_basic, + "client_secret_jwt": ClientSecretJWT(), + "private_key_jwt": PrivateKeyJWTWithKid(), + } + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self._config = hs.config.auth + assert self._config.oauth_delegation_enabled, "OAuth delegation is not enabled" + assert self._config.oauth_delegation_issuer, "No issuer provided" + assert self._config.oauth_delegation_client_id, "No client_id provided" + assert self._config.oauth_delegation_client_secret, "No client_secret provided" + assert ( + self._config.oauth_delegation_client_auth_method + in OAuthDelegatedAuth.AUTH_METHODS + ), "Invalid client_auth_method" + + self._http_client = hs.get_proxied_http_client() + self._hostname = hs.hostname + + self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) + secret = self._config.oauth_delegation_client_secret + self._client_auth = ClientAuth( + self._config.oauth_delegation_client_id, + secret, + OAuthDelegatedAuth.AUTH_METHODS[ + self._config.oauth_delegation_client_auth_method + ], + ) + + async def _load_metadata(self) -> OpenIDProviderMetadata: + if self._config.oauth_delegation_issuer_metadata is not None: + return OpenIDProviderMetadata( + **self._config.oauth_delegation_issuer_metadata + ) + url = get_well_known_url(self._config.oauth_delegation_issuer, external=True) + response = await self._http_client.get_json(url) + metadata = OpenIDProviderMetadata(**response) + # metadata.validate_introspection_endpoint() + return metadata + + async def _introspect_token(self, token: str) -> IntrospectionToken: + metadata = await self._issuer_metadata.get() + introspection_endpoint = metadata.get("introspection_endpoint") + raw_headers: Dict[str, str] = { + "Content-Type": "application/x-www-form-urlencoded", + "User-Agent": str(self._http_client.user_agent, "utf-8"), + "Accept": "application/json", + } + + args = {"token": token, "token_type_hint": "access_token"} + body = urlencode(args, True) + + # Fill the body/headers with credentials + uri, raw_headers, body = self._client_auth.prepare( + method="POST", uri=introspection_endpoint, headers=raw_headers, body=body + ) + headers = Headers({k: [v] for (k, v) in raw_headers.items()}) + + # Do the actual request + # We're not using the SimpleHttpClient util methods as we don't want to + # check the HTTP status code and we do the body encoding ourself. + response = await self._http_client.request( + method="POST", + uri=uri, + data=body.encode("utf-8"), + headers=headers, + ) + + resp_body = await make_deferred_yieldable(readBody(response)) + # TODO: Let's not worry about 5xx errors & co. for now and just try + # decoding that as JSON. We should also do some validation of the + # response + resp = json_decoder.decode(resp_body.decode("utf-8")) + return IntrospectionToken(**resp) + + async def get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + ) -> Requester: + access_token = self.get_access_token_from_request(request) + return await self.get_user_by_access_token(access_token, allow_expired) + + async def get_user_by_access_token( + self, + token: str, + allow_expired: bool = False, + ) -> Requester: + introspection_result = await self._introspect_token(token) + + logger.info(f"Introspection result: {introspection_result!r}") + + # TODO: introspection verification should be more extensive, especially: + # - verify the scopes + # - verify the audience + if not introspection_result.get("active"): + raise AuthError( + 403, + "Invalid access token", + ) + + # TODO: claim mapping should be configurable + username: Optional[str] = introspection_result.get("username") + if username is None or not isinstance(username, str): + raise AuthError( + 500, + "Invalid username claim in the introspection result", + ) + + # Let's look at the scope + scope: List[str] = scope_to_list(introspection_result.get("scope", "")) + device_id = None + # Find device_id in scope + for tok in scope: + if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): + parts = tok.split(":") + if len(parts) == 5: + device_id = parts[4] + + user_id = UserID(username, self._hostname) + user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + + # If the user does not exist, we should create it on the fly + # TODO: we could use SCIM to provision users ahead of time and listen + # for SCIM SET events if those ever become standard: + # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 + if not user_info: + await self.store.register_user(user_id=user_id.to_string()) + user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + if not user_info: + raise AuthError( + 500, + "Could not create user on the fly", + ) + + if device_id: + # Create the device on the fly if it does not exist + try: + await self.store.get_device( + user_id=user_id.to_string(), device_id=device_id + ) + except StoreError: + await self.store.store_device( + user_id=user_id.to_string(), + device_id=device_id, + initial_device_display_name="OIDC-native client", + ) + + # TODO: there is a few things missing in the requester here, which still need + # to be figured out, like: + # - impersonation, with the `authenticated_entity`, which is used for + # rate-limiting, MAU limits, etc. + # - shadow-banning, with the `shadow_banned` flag + # - a proper solution for appservices, which still needs to be figured out in + # the context of MSC3861 + return create_requester( + user_id=user_id, + device_id=device_id, + ) diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 35774962c0..25b5cc60dc 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -14,9 +14,11 @@ # limitations under the License. from typing import Any +from authlib.jose.rfc7517 import JsonWebKey + from synapse.types import JsonDict -from ._base import Config +from ._base import Config, ConfigError class AuthConfig(Config): @@ -53,3 +55,29 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) + + oauth_delegation = config.get("oauth_delegation", {}) + self.oauth_delegation_enabled = oauth_delegation.get("enabled", False) + self.oauth_delegation_issuer = oauth_delegation.get("issuer", "") + self.oauth_delegation_issuer_metadata = oauth_delegation.get("issuer_metadata") + self.oauth_delegation_account = oauth_delegation.get("account", "") + self.oauth_delegation_client_id = oauth_delegation.get("client_id", "") + self.oauth_delegation_client_secret = oauth_delegation.get("client_secret", "") + self.oauth_delegation_client_auth_method = oauth_delegation.get( + "client_auth_method", "client_secret_post" + ) + + self.password_enabled = password_config.get( + "enabled", not self.oauth_delegation_enabled + ) + + if self.oauth_delegation_client_auth_method == "private_key_jwt": + self.oauth_delegation_client_secret = JsonWebKey.import_key( + self.oauth_delegation_client_secret + ) + + # If we are delegating via OAuth then password cannot be supported as well + if self.oauth_delegation_enabled and self.password_enabled: + raise ConfigError( + "Password auth cannot be enabled when OAuth delegation is enabled" + ) diff --git a/synapse/server.py b/synapse/server.py index df88af12a9..1c82500f30 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -428,6 +428,10 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_auth(self) -> Auth: + if self.config.auth.oauth_delegation_enabled: + from synapse.api.auth.oauth_delegated import OAuthDelegatedAuth + + return OAuthDelegatedAuth(self) return InternalAuth(self) @cache_in_self From 8f576aa462684e13b20dc380e759a76e6db821b6 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 16 May 2023 15:36:40 +0200 Subject: [PATCH 049/562] Expose the public keys used for client authentication on an endpoint --- synapse/rest/synapse/client/__init__.py | 6 +++ synapse/rest/synapse/client/jwks.py | 72 +++++++++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 synapse/rest/synapse/client/jwks.py diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index e55924f597..dcfd0ad6aa 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -46,6 +46,12 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc "/_synapse/client/unsubscribe": UnsubscribeResource(hs), } + # Expose the JWKS endpoint if OAuth2 delegation is enabled + if hs.config.auth.oauth_delegation_enabled: + from synapse.rest.synapse.client.jwks import JwksResource + + resources["/_synapse/jwks"] = JwksResource(hs) + # provider-specific SSO bits. Only load these if they are enabled, since they # rely on optional dependencies. if hs.config.oidc.oidc_enabled: diff --git a/synapse/rest/synapse/client/jwks.py b/synapse/rest/synapse/client/jwks.py new file mode 100644 index 0000000000..818585843e --- /dev/null +++ b/synapse/rest/synapse/client/jwks.py @@ -0,0 +1,72 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Tuple + +from synapse.http.server import DirectServeJsonResource +from synapse.http.site import SynapseRequest +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class JwksResource(DirectServeJsonResource): + def __init__(self, hs: "HomeServer"): + from authlib.jose.rfc7517 import Key + + super().__init__(extract_context=True) + + # Parameters that are allowed to be exposed in the public key. + # This is done manually, because authlib's private to public key conversion + # is unreliable depending on the version. Instead, we just serialize the private + # key and only keep the public parameters. + # List from https://www.iana.org/assignments/jose/jose.xhtml#web-key-parameters + public_parameters = { + "kty", + "use", + "key_ops", + "alg", + "kid", + "x5u", + "x5c", + "x5t", + "x5t#S256", + "crv", + "x", + "y", + "n", + "e", + "ext", + } + + secret = hs.config.auth.oauth_delegation_client_secret + + if isinstance(secret, Key): + private_key = secret.as_dict() + public_key = { + k: v for k, v in private_key.items() if k in public_parameters + } + keys = [public_key] + else: + keys = [] + + self.res = { + "keys": keys, + } + + async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + return 200, self.res From e82ec6d00819253d15d22a41ba3b75ad77dce98f Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Thu, 18 Nov 2021 15:21:00 +0100 Subject: [PATCH 050/562] MSC2965: OIDC Provider discovery via well-known document --- synapse/rest/well_known.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index e2174fdfea..fd3b17a5ad 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -44,6 +44,15 @@ class WellKnownBuilder: "base_url": self._config.registration.default_identity_server } + if self._config.auth.oauth_delegation_enabled: + result["org.matrix.msc2965.authentication"] = { + "issuer": self._config.auth.oauth_delegation_issuer + } + if self._config.auth.oauth_delegation_account != "": + result["org.matrix.msc2965.authentication"][ + "account" + ] = self._config.auth.oauth_delegation_account + if self._config.server.extra_well_known_client_content: for ( key, From c5cf1b421d8e0d765f812880ff41fe5d244a0919 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 17 Jun 2022 16:58:05 +0200 Subject: [PATCH 051/562] Save the scopes in the requester --- synapse/api/auth/oauth_delegated.py | 1 + synapse/types/__init__.py | 8 ++++++++ tests/api/test_auth.py | 2 ++ 3 files changed, 11 insertions(+) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index b3b5c29a94..2715127e32 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -224,4 +224,5 @@ class OAuthDelegatedAuth(BaseAuth): return create_requester( user_id=user_id, device_id=device_id, + scope=scope, ) diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 42baf8ac6b..dfc95e8ebb 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -131,6 +131,7 @@ class Requester: user: "UserID" access_token_id: Optional[int] is_guest: bool + scope: Set[str] shadow_banned: bool device_id: Optional[str] app_service: Optional["ApplicationService"] @@ -147,6 +148,7 @@ class Requester: "user_id": self.user.to_string(), "access_token_id": self.access_token_id, "is_guest": self.is_guest, + "scope": list(self.scope), "shadow_banned": self.shadow_banned, "device_id": self.device_id, "app_server_id": self.app_service.id if self.app_service else None, @@ -175,6 +177,7 @@ class Requester: user=UserID.from_string(input["user_id"]), access_token_id=input["access_token_id"], is_guest=input["is_guest"], + scope=set(input["scope"]), shadow_banned=input["shadow_banned"], device_id=input["device_id"], app_service=appservice, @@ -186,6 +189,7 @@ def create_requester( user_id: Union[str, "UserID"], access_token_id: Optional[int] = None, is_guest: bool = False, + scope: StrCollection = (), shadow_banned: bool = False, device_id: Optional[str] = None, app_service: Optional["ApplicationService"] = None, @@ -199,6 +203,7 @@ def create_requester( access_token_id: *ID* of the access token used for this request, or None if it came via the appservice API or similar is_guest: True if the user making this request is a guest user + scope: the scope of the access token used for this request, if any shadow_banned: True if the user making this request is shadow-banned. device_id: device_id which was set at authentication time app_service: the AS requesting on behalf of the user @@ -215,10 +220,13 @@ def create_requester( if authenticated_entity is None: authenticated_entity = user_id.to_string() + scope = set(scope) + return Requester( user_id, access_token_id, is_guest, + scope, shadow_banned, device_id, app_service, diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 3dac52d178..cdb0048122 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -426,6 +426,7 @@ class AuthTestCase(unittest.HomeserverTestCase): access_token_id=None, device_id="FOOBAR", is_guest=False, + scope=set(), shadow_banned=False, app_service=appservice, authenticated_entity="@appservice:server", @@ -456,6 +457,7 @@ class AuthTestCase(unittest.HomeserverTestCase): access_token_id=None, device_id="FOOBAR", is_guest=False, + scope=set(), shadow_banned=False, app_service=appservice, authenticated_entity="@appservice:server", From 7628dbf4e9b48d9714ccbd0530af579d9c290fed Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 20 Jun 2022 11:17:48 +0200 Subject: [PATCH 052/562] Handle the Synapse admin scope --- synapse/api/auth/oauth_delegated.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index 2715127e32..ff1f395e58 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -140,6 +140,9 @@ class OAuthDelegatedAuth(BaseAuth): resp = json_decoder.decode(resp_body.decode("utf-8")) return IntrospectionToken(**resp) + async def is_server_admin(self, requester: Requester) -> bool: + return "urn:synapse:admin:*" in requester.scope + async def get_user_by_req( self, request: SynapseRequest, From f9cd549f6485620381443f2b4b75a1bd0a88d39f Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 13 Sep 2022 16:13:20 +0200 Subject: [PATCH 053/562] Record the `sub` claims as an external_id --- synapse/api/auth/oauth_delegated.py | 59 ++++++++++++++++++----------- 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index ff1f395e58..5565ef0a1a 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -68,6 +68,8 @@ class OAuthDelegatedAuth(BaseAuth): "private_key_jwt": PrivateKeyJWTWithKid(), } + EXTERNAL_ID_PROVIDER = "oauth-delegated" + def __init__(self, hs: "HomeServer"): super().__init__(hs) @@ -170,13 +172,42 @@ class OAuthDelegatedAuth(BaseAuth): "Invalid access token", ) - # TODO: claim mapping should be configurable - username: Optional[str] = introspection_result.get("username") - if username is None or not isinstance(username, str): - raise AuthError( - 500, - "Invalid username claim in the introspection result", + # Match via the sub claim + sub: Optional[str] = introspection_result.get("sub") + if sub is None: + raise AuthError(500, "Invalid sub claim in the introspection result") + + user_id_str = await self.store.get_user_by_external_id( + OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub + ) + if user_id_str is None: + # If we could not find a user via the external_id, it either does not exist, + # or the external_id was never recorded + + # TODO: claim mapping should be configurable + username: Optional[str] = introspection_result.get("username") + if username is None or not isinstance(username, str): + raise AuthError( + 500, + "Invalid username claim in the introspection result", + ) + user_id = UserID(username, self._hostname) + + # First try to find a user from the username claim + user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + if user_info is None: + # If the user does not exist, we should create it on the fly + # TODO: we could use SCIM to provision users ahead of time and listen + # for SCIM SET events if those ever become standard: + # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 + await self.store.register_user(user_id=user_id.to_string()) + + # And record the sub as external_id + await self.store.record_user_external_id( + OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string() ) + else: + user_id = UserID.from_string(user_id_str) # Let's look at the scope scope: List[str] = scope_to_list(introspection_result.get("scope", "")) @@ -188,22 +219,6 @@ class OAuthDelegatedAuth(BaseAuth): if len(parts) == 5: device_id = parts[4] - user_id = UserID(username, self._hostname) - user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) - - # If the user does not exist, we should create it on the fly - # TODO: we could use SCIM to provision users ahead of time and listen - # for SCIM SET events if those ever become standard: - # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 - if not user_info: - await self.store.register_user(user_id=user_id.to_string()) - user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) - if not user_info: - raise AuthError( - 500, - "Could not create user on the fly", - ) - if device_id: # Create the device on the fly if it does not exist try: From d20669971a5be17776a2991c77f5348662bb3902 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Tue, 20 Sep 2022 12:54:18 +0100 Subject: [PATCH 054/562] Use `name` claim as display name when registering users on the fly. This makes is so that the `name` claim got when introspecting the token is used as the display name when registering a user on the fly. --- synapse/api/auth/oauth_delegated.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index 5565ef0a1a..9e01e3fadc 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -200,7 +200,14 @@ class OAuthDelegatedAuth(BaseAuth): # TODO: we could use SCIM to provision users ahead of time and listen # for SCIM SET events if those ever become standard: # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 - await self.store.register_user(user_id=user_id.to_string()) + + # TODO: claim mapping should be configurable + # If present, use the name claim as the displayname + name: Optional[str] = introspection_result.get("name") + + await self.store.register_user( + user_id=user_id.to_string(), create_profile_with_displayname=name + ) # And record the sub as external_id await self.store.record_user_external_id( From a1374b5c70fc8520930a1777dc131403812d7967 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Wed, 16 Nov 2022 11:05:05 +0000 Subject: [PATCH 055/562] MSC2967: Check access token scope for use as user and add guest support --- synapse/api/auth/oauth_delegated.py | 30 +++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index 9e01e3fadc..cfa178218c 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -25,7 +25,7 @@ from twisted.web.client import readBody from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth -from synapse.api.errors import AuthError, StoreError +from synapse.api.errors import AuthError, InvalidClientTokenError, StoreError from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable from synapse.types import Requester, UserID, create_requester @@ -164,18 +164,29 @@ class OAuthDelegatedAuth(BaseAuth): logger.info(f"Introspection result: {introspection_result!r}") # TODO: introspection verification should be more extensive, especially: - # - verify the scopes # - verify the audience if not introspection_result.get("active"): - raise AuthError( - 403, - "Invalid access token", - ) + raise InvalidClientTokenError("Token is not active") + + # Let's look at the scope + scope: List[str] = scope_to_list(introspection_result.get("scope", "")) + + # Determine type of user based on presence of particular scopes + has_admin_scope = "urn:synapse:admin:*" in scope + has_user_scope = "urn:matrix:org.matrix.msc2967.client:api:*" in scope + has_guest_scope = "urn:matrix:org.matrix.msc2967.client:api:guest" in scope + is_user = has_user_scope or has_admin_scope + is_guest = has_guest_scope and not is_user + + if not is_user and not is_guest: + raise InvalidClientTokenError("No scope in token granting user rights") # Match via the sub claim sub: Optional[str] = introspection_result.get("sub") if sub is None: - raise AuthError(500, "Invalid sub claim in the introspection result") + raise InvalidClientTokenError( + "Invalid sub claim in the introspection result" + ) user_id_str = await self.store.get_user_by_external_id( OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub @@ -216,10 +227,8 @@ class OAuthDelegatedAuth(BaseAuth): else: user_id = UserID.from_string(user_id_str) - # Let's look at the scope - scope: List[str] = scope_to_list(introspection_result.get("scope", "")) - device_id = None # Find device_id in scope + device_id = None for tok in scope: if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): parts = tok.split(":") @@ -250,4 +259,5 @@ class OAuthDelegatedAuth(BaseAuth): user_id=user_id, device_id=device_id, scope=scope, + is_guest=is_guest, ) From 28a9663bdf092541250ae1209f201e57b663dc81 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Wed, 16 Nov 2022 17:44:13 +0000 Subject: [PATCH 056/562] Initial tests for OAuth delegation --- tests/handlers/test_oauth_delegation.py | 345 ++++++++++++++++++++++++ 1 file changed, 345 insertions(+) create mode 100644 tests/handlers/test_oauth_delegation.py diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py new file mode 100644 index 0000000000..54f4894819 --- /dev/null +++ b/tests/handlers/test_oauth_delegation.py @@ -0,0 +1,345 @@ +# Copyright 2022 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict +from unittest.mock import ANY, Mock +from urllib.parse import parse_qs + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import InvalidClientTokenError +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock +from tests.unittest import HomeserverTestCase, skip_unless +from tests.utils import mock_getRawHeaders + +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + + +# These are a few constants that are used as config parameters in the tests. +SERVER_NAME = "test" +ISSUER = "https://issuer/" +CLIENT_ID = "test-client-id" +CLIENT_SECRET = "test-client-secret" +BASE_URL = "https://synapse/" +SCOPES = ["openid"] + +AUTHORIZATION_ENDPOINT = ISSUER + "authorize" +TOKEN_ENDPOINT = ISSUER + "token" +USERINFO_ENDPOINT = ISSUER + "userinfo" +WELL_KNOWN = ISSUER + ".well-known/openid-configuration" +JWKS_URI = ISSUER + ".well-known/jwks.json" +INTROSPECTION_ENDPOINT = ISSUER + "introspect" + +SYNAPSE_ADMIN_SCOPE = "urn:synapse:admin:*" +MATRIX_USER_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:*" +MATRIX_GUEST_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:guest" +DEVICE = "AABBCCDD" +MATRIX_DEVICE_SCOPE = "urn:matrix:org.matrix.msc2967.client:device:" + DEVICE +SUBJECT = "abc-def-ghi" +USERNAME = "test-user" + + +async def get_json(url: str) -> JsonDict: + # Mock get_json calls to handle jwks & oidc discovery endpoints + if url == WELL_KNOWN: + # Minimal discovery document, as defined in OpenID.Discovery + # https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata + return { + "issuer": ISSUER, + "authorization_endpoint": AUTHORIZATION_ENDPOINT, + "token_endpoint": TOKEN_ENDPOINT, + "jwks_uri": JWKS_URI, + "userinfo_endpoint": USERINFO_ENDPOINT, + "introspection_endpoint": INTROSPECTION_ENDPOINT, + "response_types_supported": ["code"], + "subject_types_supported": ["public"], + "id_token_signing_alg_values_supported": ["RS256"], + } + elif url == JWKS_URI: + return {"keys": []} + + return {} + + +@skip_unless(HAS_AUTHLIB, "requires authlib") +class MSC3861OAuthDelegation(HomeserverTestCase): + def default_config(self) -> Dict[str, Any]: + config = super().default_config() + config["public_baseurl"] = BASE_URL + config["oauth_delegation"] = { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } + return config + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.http_client = Mock(spec=["get_json"]) + self.http_client.get_json.side_effect = get_json + self.http_client.user_agent = b"Synapse Test" + + hs = self.setup_test_homeserver(proxied_http_client=self.http_client) + + self.auth = hs.get_auth() + + return hs + + def _assertParams(self) -> None: + """Assert that the request parameters are correct.""" + params = parse_qs(self.http_client.request.call_args[1]["data"].decode("utf-8")) + self.assertEqual(params["token"], ["mockAccessToken"]) + self.assertEqual(params["client_id"], [CLIENT_ID]) + self.assertEqual(params["client_secret"], [CLIENT_SECRET]) + + def test_inactive_token(self) -> None: + """The handler should return a 403 where the token is inactive.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={"active": False}, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_no_scope(self) -> None: + """The handler should return a 403 where no scope is given.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={"active": True}, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_user_no_subject(self) -> None: + """The handler should return a 500 when no subject is present.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={"active": True, "scope": " ".join([MATRIX_USER_SCOPE])}, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_no_user_scope(self) -> None: + """The handler should return a 500 when no subject is present.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_DEVICE_SCOPE]), + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + + def test_active_admin(self) -> None: + """The handler should return a requester with admin rights.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([SYNAPSE_ADMIN_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), True + ) + + def test_active_admin_highest_privilege(self) -> None: + """The handler should resolve to the most permissive scope.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join( + [SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE, MATRIX_GUEST_SCOPE] + ), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), True + ) + + def test_active_user(self) -> None: + """The handler should return a requester with normal user rights.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + + def test_active_user_with_device(self) -> None: + """The handler should return a requester with normal user rights and a device ID.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + self.assertEqual(requester.device_id, DEVICE) + + def test_active_guest_with_device(self) -> None: + """The handler should return a requester with guest user rights and a device ID.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, True) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + self.assertEqual(requester.device_id, DEVICE) From 5fe96082d09d1af3dc33b62b6a47a6baca02703c Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Thu, 17 Nov 2022 14:34:11 +0000 Subject: [PATCH 057/562] Actually enforce guest + return www-authenticate header --- synapse/api/auth/oauth_delegated.py | 18 +++++++++-- synapse/api/errors.py | 28 ++++++++++++++-- synapse/http/server.py | 6 ++++ tests/handlers/test_oauth_delegation.py | 43 +++++++++++++++++++++++-- 4 files changed, 87 insertions(+), 8 deletions(-) diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/oauth_delegated.py index cfa178218c..9cb6eb7f79 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/oauth_delegated.py @@ -25,7 +25,12 @@ from twisted.web.client import readBody from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth -from synapse.api.errors import AuthError, InvalidClientTokenError, StoreError +from synapse.api.errors import ( + AuthError, + InvalidClientTokenError, + OAuthInsufficientScopeError, + StoreError, +) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable from synapse.types import Requester, UserID, create_requester @@ -152,7 +157,16 @@ class OAuthDelegatedAuth(BaseAuth): allow_expired: bool = False, ) -> Requester: access_token = self.get_access_token_from_request(request) - return await self.get_user_by_access_token(access_token, allow_expired) + + # TODO: we probably want to assert the allow_guest inside this call so that we don't provision the user if they don't have enough permission: + requester = await self.get_user_by_access_token(access_token, allow_expired) + + if not allow_guest and requester.is_guest: + raise OAuthInsufficientScopeError( + ["urn:matrix:org.matrix.msc2967.client:api:*"] + ) + + return requester async def get_user_by_access_token( self, diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 8c7c94b045..af894243f8 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -119,14 +119,20 @@ class Codes(str, Enum): class CodeMessageException(RuntimeError): - """An exception with integer code and message string attributes. + """An exception with integer code, a message string attributes and optional headers. Attributes: code: HTTP error code msg: string describing the error + headers: optional response headers to send """ - def __init__(self, code: Union[int, HTTPStatus], msg: str): + def __init__( + self, + code: Union[int, HTTPStatus], + msg: str, + headers: Optional[Dict[str, str]] = None, + ): super().__init__("%d: %s" % (code, msg)) # Some calls to this method pass instances of http.HTTPStatus for `code`. @@ -137,6 +143,7 @@ class CodeMessageException(RuntimeError): # To eliminate this behaviour, we convert them to their integer equivalents here. self.code = int(code) self.msg = msg + self.headers = headers class RedirectException(CodeMessageException): @@ -182,6 +189,7 @@ class SynapseError(CodeMessageException): msg: str, errcode: str = Codes.UNKNOWN, additional_fields: Optional[Dict] = None, + headers: Optional[Dict[str, str]] = None, ): """Constructs a synapse error. @@ -190,7 +198,7 @@ class SynapseError(CodeMessageException): msg: The human-readable error message. errcode: The matrix error code e.g 'M_FORBIDDEN' """ - super().__init__(code, msg) + super().__init__(code, msg, headers) self.errcode = errcode if additional_fields is None: self._additional_fields: Dict = {} @@ -335,6 +343,20 @@ class AuthError(SynapseError): super().__init__(code, msg, errcode, additional_fields) +class OAuthInsufficientScopeError(SynapseError): + """An error raised when the caller does not have sufficient scope to perform the requested action""" + + def __init__( + self, + required_scopes: List[str], + ): + headers = { + "WWW-Authenticate": 'Bearer error="insufficient_scope", scope="%s"' + % (" ".join(required_scopes)) + } + super().__init__(401, "Insufficient scope", Codes.FORBIDDEN, None, headers) + + class UnstableSpecAuthError(AuthError): """An error raised when a new error code is being proposed to replace a previous one. This error will return a "org.matrix.unstable.errcode" property with the new error code, diff --git a/synapse/http/server.py b/synapse/http/server.py index 101dc2e747..04768c6a23 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -111,6 +111,9 @@ def return_json_error( exc: SynapseError = f.value # type: ignore error_code = exc.code error_dict = exc.error_dict(config) + if exc.headers is not None: + for header, value in exc.headers.items(): + request.setHeader(header, value) logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) elif f.check(CancelledError): error_code = HTTP_STATUS_REQUEST_CANCELLED @@ -172,6 +175,9 @@ def return_html_error( cme: CodeMessageException = f.value # type: ignore code = cme.code msg = cme.msg + if cme.headers is not None: + for header, value in cme.headers.items(): + request.setHeader(header, value) if isinstance(cme, RedirectException): logger.info("%s redirect to %s", request, cme.location) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 54f4894819..bca9db1626 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -17,7 +17,8 @@ from urllib.parse import parse_qs from twisted.test.proto_helpers import MemoryReactor -from synapse.api.errors import InvalidClientTokenError +from synapse.api.errors import InvalidClientTokenError, OAuthInsufficientScopeError +from synapse.rest.client import devices from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -82,6 +83,10 @@ async def get_json(url: str) -> JsonDict: @skip_unless(HAS_AUTHLIB, "requires authlib") class MSC3861OAuthDelegation(HomeserverTestCase): + servlets = [ + devices.register_servlets, + ] + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL @@ -314,7 +319,37 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(requester.device_id, DEVICE) - def test_active_guest_with_device(self) -> None: + def test_active_guest_not_allowed(self) -> None: + """The handler should return an insufficient scope error.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + error = self.get_failure( + self.auth.get_user_by_req(request), OAuthInsufficientScopeError + ) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual( + getattr(error.value, "headers", {})["WWW-Authenticate"], + 'Bearer error="insufficient_scope", scope="urn:matrix:org.matrix.msc2967.client:api:*"', + ) + + def test_active_guest_allowed(self) -> None: """The handler should return a requester with guest user rights and a device ID.""" self.http_client.request = simple_async_mock( @@ -331,7 +366,9 @@ class MSC3861OAuthDelegation(HomeserverTestCase): request = Mock(args={}) request.args[b"access_token"] = [b"mockAccessToken"] request.requestHeaders.getRawHeaders = mock_getRawHeaders() - requester = self.get_success(self.auth.get_user_by_req(request)) + requester = self.get_success( + self.auth.get_user_by_req(request, allow_guest=True) + ) self.http_client.get_json.assert_called_once_with(WELL_KNOWN) self.http_client.request.assert_called_once_with( method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY From 31691d61511d41286272d779727502e396ce86eb Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 10 May 2023 16:08:43 +0200 Subject: [PATCH 058/562] Disable account related endpoints when using OAuth delegation --- synapse/handlers/auth.py | 8 +- synapse/rest/client/account.py | 24 ++-- synapse/rest/client/devices.py | 11 +- synapse/rest/client/keys.py | 30 +++- synapse/rest/client/login.py | 3 + synapse/rest/client/logout.py | 3 + synapse/rest/client/register.py | 3 + tests/handlers/test_oauth_delegation.py | 180 +++++++++++++++++++++++- 8 files changed, 243 insertions(+), 19 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d001f2fb2f..a53984be33 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -274,6 +274,8 @@ class AuthHandler: # response. self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {} + self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled + async def validate_user_via_ui_auth( self, requester: Requester, @@ -322,8 +324,12 @@ class AuthHandler: LimitExceededError if the ratelimiter's failed request count for this user is too high to proceed - """ + if self.oauth_delegation_enabled: + raise SynapseError( + HTTPStatus.INTERNAL_SERVER_ERROR, "UIA shouldn't be used with MSC3861" + ) + if not requester.access_token_id: raise ValueError("Cannot validate a user without an access token") if can_skip_ui_auth and self._ui_auth_session_timeout: diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 3d0c55daa0..ccd1f7509c 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -27,6 +27,7 @@ from synapse.api.constants import LoginType from synapse.api.errors import ( Codes, InteractiveAuthIncompleteError, + NotFoundError, SynapseError, ThreepidValidationError, ) @@ -600,6 +601,9 @@ class ThreepidRestServlet(RestServlet): # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle # `threePidCreds` versus `three_pid_creds`. async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + if self.hs.config.auth.oauth_delegation_enabled: + raise NotFoundError(errcode=Codes.UNRECOGNIZED) + if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( 400, "3PID changes are disabled on this server", Codes.FORBIDDEN @@ -890,19 +894,21 @@ class AccountStatusRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: - EmailPasswordRequestTokenRestServlet(hs).register(http_server) - PasswordRestServlet(hs).register(http_server) - DeactivateAccountRestServlet(hs).register(http_server) - EmailThreepidRequestTokenRestServlet(hs).register(http_server) - MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) - AddThreepidEmailSubmitTokenServlet(hs).register(http_server) - AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server) + if not hs.config.auth.oauth_delegation_enabled: + EmailPasswordRequestTokenRestServlet(hs).register(http_server) + DeactivateAccountRestServlet(hs).register(http_server) + PasswordRestServlet(hs).register(http_server) + EmailThreepidRequestTokenRestServlet(hs).register(http_server) + MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) + AddThreepidEmailSubmitTokenServlet(hs).register(http_server) + AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None: - ThreepidAddRestServlet(hs).register(http_server) ThreepidBindRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) - ThreepidDeleteRestServlet(hs).register(http_server) + if not hs.config.auth.oauth_delegation_enabled: + ThreepidAddRestServlet(hs).register(http_server) + ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None and hs.config.experimental.msc3720_enabled: diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index e97d0bf475..00e9bff43f 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple from pydantic import Extra, StrictStr from synapse.api import errors -from synapse.api.errors import NotFoundError +from synapse.api.errors import NotFoundError, UnrecognizedRequestError from synapse.handlers.device import DeviceHandler from synapse.http.server import HttpServer from synapse.http.servlet import ( @@ -135,6 +135,7 @@ class DeviceRestServlet(RestServlet): self.device_handler = handler self.auth_handler = hs.get_auth_handler() self._msc3852_enabled = hs.config.experimental.msc3852_enabled + self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled async def on_GET( self, request: SynapseRequest, device_id: str @@ -166,6 +167,9 @@ class DeviceRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, device_id: str ) -> Tuple[int, JsonDict]: + if self.oauth_delegation_enabled: + raise UnrecognizedRequestError(code=404) + requester = await self.auth.get_user_by_req(request) try: @@ -344,7 +348,10 @@ class ClaimDehydratedDeviceServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.worker.worker_app is None: + if ( + hs.config.worker.worker_app is None + and not hs.config.auth.oauth_delegation_enabled + ): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None: diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 413edd8a4d..c3ca83c0c8 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -17,9 +17,10 @@ import logging import re from collections import Counter +from http import HTTPStatus from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple -from synapse.api.errors import InvalidAPICallError, SynapseError +from synapse.api.errors import Codes, InvalidAPICallError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -375,9 +376,29 @@ class SigningKeyUploadServlet(RestServlet): user_id = requester.user.to_string() body = parse_json_object_from_request(request) - if self.hs.config.experimental.msc3967_enabled: - if await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id): - # If we already have a master key then cross signing is set up and we require UIA to reset + is_cross_signing_setup = ( + await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id) + ) + + # Before MSC3967 we required UIA both when setting up cross signing for the + # first time and when resetting the device signing key. With MSC3967 we only + # require UIA when resetting cross-signing, and not when setting up the first + # time. Because there is no UIA in MSC3861, for now we throw an error if the + # user tries to reset the device signing key when MSC3861 is enabled, but allow + # first-time setup. + if self.hs.config.auth.oauth_delegation_enabled: + # There is no way to reset the device signing key with MSC3861 + if is_cross_signing_setup: + raise SynapseError( + HTTPStatus.NOT_IMPLEMENTED, + "Resetting cross signing keys is not yet supported with MSC3861", + Codes.UNRECOGNIZED, + ) + # But first-time setup is fine + + elif self.hs.config.experimental.msc3967_enabled: + # If we already have a master key then cross signing is set up and we require UIA to reset + if is_cross_signing_setup: await self.auth_handler.validate_user_via_ui_auth( requester, request, @@ -387,6 +408,7 @@ class SigningKeyUploadServlet(RestServlet): can_skip_ui_auth=False, ) # Otherwise we don't require UIA since we are setting up cross signing for first time + else: # Previous behaviour is to always require UIA but allow it to be skipped await self.auth_handler.validate_user_via_ui_auth( diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 6ca61ffbd0..4d0eabcb84 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -633,6 +633,9 @@ class CasTicketServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.auth.oauth_delegation_enabled: + return + LoginRestServlet(hs).register(http_server) if ( hs.config.worker.worker_app is None diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index 6d34625ad5..b64a6d5961 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -80,5 +80,8 @@ class LogoutAllRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.auth.oauth_delegation_enabled: + return + LogoutRestServlet(hs).register(http_server) LogoutAllRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 7f84a17e29..6866988c38 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -955,6 +955,9 @@ def _calculate_registration_flows( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.auth.oauth_delegation_enabled: + return + if hs.config.worker.worker_app is None: EmailRegisterRequestTokenRestServlet(hs).register(http_server) MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index bca9db1626..ee1bc5ca7a 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -11,14 +11,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict + +from http import HTTPStatus +from typing import Any, Dict, Union from unittest.mock import ANY, Mock from urllib.parse import parse_qs +from signedjson.key import ( + encode_verify_key_base64, + generate_signing_key, + get_verify_key, +) +from signedjson.sign import sign_json + from twisted.test.proto_helpers import MemoryReactor -from synapse.api.errors import InvalidClientTokenError, OAuthInsufficientScopeError -from synapse.rest.client import devices +from synapse.api.errors import ( + Codes, + InvalidClientTokenError, + OAuthInsufficientScopeError, +) +from synapse.rest.client import account, devices, keys, login, logout, register from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -57,6 +70,7 @@ DEVICE = "AABBCCDD" MATRIX_DEVICE_SCOPE = "urn:matrix:org.matrix.msc2967.client:device:" + DEVICE SUBJECT = "abc-def-ghi" USERNAME = "test-user" +USER_ID = "@" + USERNAME + ":" + SERVER_NAME async def get_json(url: str) -> JsonDict: @@ -84,7 +98,12 @@ async def get_json(url: str) -> JsonDict: @skip_unless(HAS_AUTHLIB, "requires authlib") class MSC3861OAuthDelegation(HomeserverTestCase): servlets = [ + account.register_servlets, devices.register_servlets, + keys.register_servlets, + register.register_servlets, + login.register_servlets, + logout.register_servlets, ] def default_config(self) -> Dict[str, Any]: @@ -380,3 +399,158 @@ class MSC3861OAuthDelegation(HomeserverTestCase): get_awaitable_result(self.auth.is_server_admin(requester)), False ) self.assertEqual(requester.device_id, DEVICE) + + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: + # We only generate a master key to simplify the test. + master_signing_key = generate_signing_key(device_id) + master_verify_key = encode_verify_key_base64(get_verify_key(master_signing_key)) + + return { + "master_key": sign_json( + { + "user_id": user_id, + "usage": ["master"], + "keys": {"ed25519:" + master_verify_key: master_verify_key}, + }, + user_id, + master_signing_key, + ), + } + + def test_cross_signing(self) -> None: + """Try uploading device keys with OAuth delegation enabled.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + keys_upload_body = self.make_device_keys(USER_ID, DEVICE) + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys_upload_body, + access_token="mockAccessToken", + ) + + self.assertEqual(channel.code, 200, channel.json_body) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys_upload_body, + access_token="mockAccessToken", + ) + + self.assertEqual(channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body) + + def expect_unauthorized( + self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + ) -> None: + channel = self.make_request(method, path, content, shorthand=False) + + self.assertEqual(channel.code, 401, channel.json_body) + + def expect_unrecognized( + self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + ) -> None: + channel = self.make_request(method, path, content) + + self.assertEqual(channel.code, 404, channel.json_body) + self.assertEqual( + channel.json_body["errcode"], Codes.UNRECOGNIZED, channel.json_body + ) + + def test_uia_endpoints(self) -> None: + """Test that endpoints that were removed in MSC2964 are no longer available.""" + + # This is just an endpoint that should remain visible (but requires auth): + self.expect_unauthorized("GET", "/_matrix/client/v3/devices") + + # This remains usable, but will require a uia scope: + self.expect_unauthorized( + "POST", "/_matrix/client/v3/keys/device_signing/upload" + ) + + def test_3pid_endpoints(self) -> None: + """Test that 3pid account management endpoints that were removed in MSC2964 are no longer available.""" + + # Remains and requires auth: + self.expect_unauthorized("GET", "/_matrix/client/v3/account/3pid") + self.expect_unauthorized( + "POST", + "/_matrix/client/v3/account/3pid/bind", + { + "client_secret": "foo", + "id_access_token": "bar", + "id_server": "foo", + "sid": "bar", + }, + ) + self.expect_unauthorized("POST", "/_matrix/client/v3/account/3pid/unbind", {}) + + # These are gone: + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/3pid" + ) # deprecated + self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/add") + self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/delete") + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/3pid/email/requestToken" + ) + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/3pid/msisdn/requestToken" + ) + + def test_account_management_endpoints_removed(self) -> None: + """Test that account management endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("POST", "/_matrix/client/v3/account/deactivate") + self.expect_unrecognized("POST", "/_matrix/client/v3/account/password") + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/password/email/requestToken" + ) + self.expect_unrecognized( + "POST", "/_matrix/client/v3/account/password/msisdn/requestToken" + ) + + def test_registration_endpoints_removed(self) -> None: + """Test that registration endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized( + "GET", "/_matrix/client/v1/register/m.login.registration_token/validity" + ) + self.expect_unrecognized("POST", "/_matrix/client/v3/register") + self.expect_unrecognized("GET", "/_matrix/client/v3/register") + self.expect_unrecognized("GET", "/_matrix/client/v3/register/available") + self.expect_unrecognized( + "POST", "/_matrix/client/v3/register/email/requestToken" + ) + self.expect_unrecognized( + "POST", "/_matrix/client/v3/register/msisdn/requestToken" + ) + + def test_session_management_endpoints_removed(self) -> None: + """Test that session management endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("GET", "/_matrix/client/v3/login") + self.expect_unrecognized("POST", "/_matrix/client/v3/login") + self.expect_unrecognized("GET", "/_matrix/client/v3/login/sso/redirect") + self.expect_unrecognized("POST", "/_matrix/client/v3/logout") + self.expect_unrecognized("POST", "/_matrix/client/v3/logout/all") + self.expect_unrecognized("POST", "/_matrix/client/v3/refresh") + self.expect_unrecognized("GET", "/_matrix/static/client/login") + + def test_device_management_endpoints_removed(self) -> None: + """Test that device management endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("POST", "/_matrix/client/v3/delete_devices") + self.expect_unrecognized("DELETE", "/_matrix/client/v3/devices/{DEVICE}") + + def test_openid_endpoints_removed(self) -> None: + """Test that OpenID id_token endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized( + "POST", "/_matrix/client/v3/user/{USERNAME}/openid/request_token" + ) From 03920bdd4e9390d74762ecd923ddf0d6c75d222e Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Mon, 6 Feb 2023 17:12:42 +0000 Subject: [PATCH 059/562] Test MSC2965 implementation: well-known discovery document --- tests/rest/test_well_known.py | 38 +++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 2091b08d89..34333d88df 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -17,6 +17,13 @@ from synapse.rest.well_known import well_known_resource from tests import unittest +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + class WellKnownTests(unittest.HomeserverTestCase): def create_test_resource(self) -> Resource: @@ -96,3 +103,34 @@ class WellKnownTests(unittest.HomeserverTestCase): "GET", "/.well-known/matrix/server", shorthand=False ) self.assertEqual(channel.code, 404) + + @unittest.skip_unless(HAS_AUTHLIB, "requires authlib") + @unittest.override_config( + { + "public_baseurl": "https://homeserver", # this is only required so that client well known is served + "oauth_delegation": { + "enabled": True, + "issuer": "https://issuer", + "account": "https://my-account.issuer", + "client_id": "id", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + }, + } + ) + def test_client_well_known_msc3861_oauth_delegation(self) -> None: + channel = self.make_request( + "GET", "/.well-known/matrix/client", shorthand=False + ) + + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, + { + "m.homeserver": {"base_url": "https://homeserver/"}, + "org.matrix.msc2965.authentication": { + "issuer": "https://issuer", + "account": "https://my-account.issuer", + }, + }, + ) From 249f4a338dde0c1bcde5e14121d8d9fa156f185f Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Tue, 9 May 2023 16:20:04 +0200 Subject: [PATCH 060/562] Refactor config to be an experimental feature Also enforce you can't combine it with incompatible config options --- ...auth_delegated.py => msc3861_delegated.py} | 53 ++--- synapse/config/auth.py | 39 +--- synapse/config/experimental.py | 193 ++++++++++++++++- synapse/handlers/auth.py | 4 +- synapse/module_api/__init__.py | 7 + synapse/rest/client/account.py | 6 +- synapse/rest/client/devices.py | 6 +- synapse/rest/client/keys.py | 2 +- synapse/rest/client/login.py | 2 +- synapse/rest/client/logout.py | 2 +- synapse/rest/client/register.py | 2 +- synapse/rest/synapse/client/__init__.py | 2 +- synapse/rest/synapse/client/jwks.py | 8 +- synapse/rest/well_known.py | 9 +- synapse/server.py | 6 +- tests/config/test_oauth_delegation.py | 202 ++++++++++++++++++ tests/handlers/test_oauth_delegation.py | 15 +- tests/rest/test_well_known.py | 17 +- 18 files changed, 479 insertions(+), 96 deletions(-) rename synapse/api/auth/{oauth_delegated.py => msc3861_delegated.py} (87%) create mode 100644 tests/config/test_oauth_delegation.py diff --git a/synapse/api/auth/oauth_delegated.py b/synapse/api/auth/msc3861_delegated.py similarity index 87% rename from synapse/api/auth/oauth_delegated.py rename to synapse/api/auth/msc3861_delegated.py index 9cb6eb7f79..4ca3280bd3 100644 --- a/synapse/api/auth/oauth_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -65,7 +65,7 @@ class PrivateKeyJWTWithKid(PrivateKeyJWT): ) -class OAuthDelegatedAuth(BaseAuth): +class MSC3861DelegatedAuth(BaseAuth): AUTH_METHODS = { "client_secret_post": encode_client_secret_post, "client_secret_basic": encode_client_secret_basic, @@ -78,35 +78,38 @@ class OAuthDelegatedAuth(BaseAuth): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self._config = hs.config.auth - assert self._config.oauth_delegation_enabled, "OAuth delegation is not enabled" - assert self._config.oauth_delegation_issuer, "No issuer provided" - assert self._config.oauth_delegation_client_id, "No client_id provided" - assert self._config.oauth_delegation_client_secret, "No client_secret provided" - assert ( - self._config.oauth_delegation_client_auth_method - in OAuthDelegatedAuth.AUTH_METHODS - ), "Invalid client_auth_method" + self._config = hs.config.experimental.msc3861 + auth_method = MSC3861DelegatedAuth.AUTH_METHODS.get( + self._config.client_auth_method.value, None + ) + # Those assertions are already checked when parsing the config + assert self._config.enabled, "OAuth delegation is not enabled" + assert self._config.issuer, "No issuer provided" + assert self._config.client_id, "No client_id provided" + assert auth_method is not None, "Invalid client_auth_method provided" self._http_client = hs.get_proxied_http_client() self._hostname = hs.hostname self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) - secret = self._config.oauth_delegation_client_secret - self._client_auth = ClientAuth( - self._config.oauth_delegation_client_id, - secret, - OAuthDelegatedAuth.AUTH_METHODS[ - self._config.oauth_delegation_client_auth_method - ], - ) + + if isinstance(auth_method, PrivateKeyJWTWithKid): + # Use the JWK as the client secret when using the private_key_jwt method + assert self._config.jwk, "No JWK provided" + self._client_auth = ClientAuth( + self._config.client_id, self._config.jwk, auth_method + ) + else: + # Else use the client secret + assert self._config.client_secret, "No client_secret provided" + self._client_auth = ClientAuth( + self._config.client_id, self._config.client_secret, auth_method + ) async def _load_metadata(self) -> OpenIDProviderMetadata: - if self._config.oauth_delegation_issuer_metadata is not None: - return OpenIDProviderMetadata( - **self._config.oauth_delegation_issuer_metadata - ) - url = get_well_known_url(self._config.oauth_delegation_issuer, external=True) + if self._config.issuer_metadata is not None: + return OpenIDProviderMetadata(**self._config.issuer_metadata) + url = get_well_known_url(self._config.issuer, external=True) response = await self._http_client.get_json(url) metadata = OpenIDProviderMetadata(**response) # metadata.validate_introspection_endpoint() @@ -203,7 +206,7 @@ class OAuthDelegatedAuth(BaseAuth): ) user_id_str = await self.store.get_user_by_external_id( - OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub + MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub ) if user_id_str is None: # If we could not find a user via the external_id, it either does not exist, @@ -236,7 +239,7 @@ class OAuthDelegatedAuth(BaseAuth): # And record the sub as external_id await self.store.record_user_external_id( - OAuthDelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string() + MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string() ) else: user_id = UserID.from_string(user_id_str) diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 25b5cc60dc..12e853980e 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -14,11 +14,9 @@ # limitations under the License. from typing import Any -from authlib.jose.rfc7517 import JsonWebKey - from synapse.types import JsonDict -from ._base import Config, ConfigError +from ._base import Config class AuthConfig(Config): @@ -31,7 +29,14 @@ class AuthConfig(Config): if password_config is None: password_config = {} - passwords_enabled = password_config.get("enabled", True) + # The default value of password_config.enabled is True, unless msc3861 is enabled. + msc3861_enabled = ( + config.get("experimental_features", {}) + .get("msc3861", {}) + .get("enabled", False) + ) + passwords_enabled = password_config.get("enabled", not msc3861_enabled) + # 'only_for_reauth' allows users who have previously set a password to use it, # even though passwords would otherwise be disabled. passwords_for_reauth_only = passwords_enabled == "only_for_reauth" @@ -55,29 +60,3 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) - - oauth_delegation = config.get("oauth_delegation", {}) - self.oauth_delegation_enabled = oauth_delegation.get("enabled", False) - self.oauth_delegation_issuer = oauth_delegation.get("issuer", "") - self.oauth_delegation_issuer_metadata = oauth_delegation.get("issuer_metadata") - self.oauth_delegation_account = oauth_delegation.get("account", "") - self.oauth_delegation_client_id = oauth_delegation.get("client_id", "") - self.oauth_delegation_client_secret = oauth_delegation.get("client_secret", "") - self.oauth_delegation_client_auth_method = oauth_delegation.get( - "client_auth_method", "client_secret_post" - ) - - self.password_enabled = password_config.get( - "enabled", not self.oauth_delegation_enabled - ) - - if self.oauth_delegation_client_auth_method == "private_key_jwt": - self.oauth_delegation_client_secret = JsonWebKey.import_key( - self.oauth_delegation_client_secret - ) - - # If we are delegating via OAuth then password cannot be supported as well - if self.oauth_delegation_enabled and self.password_enabled: - raise ConfigError( - "Password auth cannot be enabled when OAuth delegation is enabled" - ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d769b7f668..b9607975f9 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -12,15 +12,196 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Optional +import enum +from typing import TYPE_CHECKING, Any, Optional import attr +import attr.validators from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config import ConfigError -from synapse.config._base import Config +from synapse.config._base import Config, RootConfig from synapse.types import JsonDict +# Determine whether authlib is installed. +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + +if TYPE_CHECKING: + # Only import this if we're type checking, as it might not be installed at runtime. + from authlib.jose.rfc7517 import JsonWebKey + + +class ClientAuthMethod(enum.Enum): + """List of supported client auth methods.""" + + CLIENT_SECRET_POST = "client_secret_post" + CLIENT_SECRET_BASIC = "client_secret_basic" + CLIENT_SECRET_JWT = "client_secret_jwt" + PRIVATE_KEY_JWT = "private_key_jwt" + + +def _parse_jwks(jwks: Optional[JsonDict]) -> Optional["JsonWebKey"]: + """A helper function to parse a JWK dict into a JsonWebKey.""" + + if jwks is None: + return None + + from authlib.jose.rfc7517 import JsonWebKey + + return JsonWebKey.import_key(jwks) + + +@attr.s(slots=True, frozen=True) +class MSC3861: + """Configuration for MSC3861: Matrix architecture change to delegate authentication via OIDC""" + + enabled: bool = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + """Whether to enable MSC3861 auth delegation.""" + + @enabled.validator + def _check_enabled(self, attribute: attr.Attribute, value: bool) -> None: + # Only allow enabling MSC3861 if authlib is installed + if value and not HAS_AUTHLIB: + raise ConfigError( + "MSC3861 is enabled but authlib is not installed. " + "Please install authlib to use MSC3861." + ) + + issuer: str = attr.ib(default="", validator=attr.validators.instance_of(str)) + """The URL of the OIDC Provider.""" + + issuer_metadata: Optional[JsonDict] = attr.ib(default=None) + """The issuer metadata to use, otherwise discovered from /.well-known/openid-configuration as per MSC2965.""" + + client_id: str = attr.ib( + default="", + validator=attr.validators.instance_of(str), + ) + """The client ID to use when calling the introspection endpoint.""" + + client_auth_method: ClientAuthMethod = attr.ib( + default=ClientAuthMethod.CLIENT_SECRET_POST, converter=ClientAuthMethod + ) + """The auth method used when calling the introspection endpoint.""" + + client_secret: Optional[str] = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str)), + ) + """ + The client secret to use when calling the introspection endpoint, + when using any of the client_secret_* client auth methods. + """ + + jwk: Optional["JsonWebKey"] = attr.ib(default=None, converter=_parse_jwks) + """ + The JWKS to use when calling the introspection endpoint, + when using the private_key_jwt client auth method. + """ + + @client_auth_method.validator + def _check_client_auth_method( + self, attribute: attr.Attribute, value: ClientAuthMethod + ) -> None: + # Check that the right client credentials are provided for the client auth method. + if not self.enabled: + return + + if value == ClientAuthMethod.PRIVATE_KEY_JWT and self.jwk is None: + raise ConfigError( + "A JWKS must be provided when using the private_key_jwt client auth method" + ) + + if ( + value + in ( + ClientAuthMethod.CLIENT_SECRET_POST, + ClientAuthMethod.CLIENT_SECRET_BASIC, + ClientAuthMethod.CLIENT_SECRET_JWT, + ) + and self.client_secret is None + ): + raise ConfigError( + f"A client secret must be provided when using the {value} client auth method" + ) + + account_management_url: Optional[str] = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str)), + ) + """The URL of the My Account page on the OIDC Provider as per MSC2965.""" + + def check_config_conflicts(self, root: RootConfig) -> None: + """Checks for any configuration conflicts with other parts of Synapse. + + Raises: + ConfigError: If there are any configuration conflicts. + """ + + if not self.enabled: + return + + if ( + root.auth.password_enabled_for_reauth + or root.auth.password_enabled_for_login + ): + raise ConfigError( + "Password auth cannot be enabled when OAuth delegation is enabled" + ) + + if root.registration.enable_registration: + raise ConfigError( + "Registration cannot be enabled when OAuth delegation is enabled" + ) + + if ( + root.oidc.oidc_enabled + or root.saml2.saml2_enabled + or root.cas.cas_enabled + or root.jwt.jwt_enabled + ): + raise ConfigError("SSO cannot be enabled when OAuth delegation is enabled") + + if bool(root.authproviders.password_providers): + raise ConfigError( + "Password auth providers cannot be enabled when OAuth delegation is enabled" + ) + + if root.captcha.enable_registration_captcha: + raise ConfigError( + "CAPTCHA cannot be enabled when OAuth delegation is enabled" + ) + + if root.experimental.msc3882_enabled: + raise ConfigError( + "MSC3882 cannot be enabled when OAuth delegation is enabled" + ) + + if root.registration.refresh_token_lifetime: + raise ConfigError( + "refresh_token_lifetime cannot be set when OAuth delegation is enabled" + ) + + if root.registration.nonrefreshable_access_token_lifetime: + raise ConfigError( + "nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled" + ) + + if root.registration.session_lifetime: + raise ConfigError( + "session_lifetime cannot be set when OAuth delegation is enabled" + ) + + if not root.experimental.msc3970_enabled: + raise ConfigError( + "experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled" + ) + @attr.s(auto_attribs=True, frozen=True, slots=True) class MSC3866Config: @@ -182,8 +363,14 @@ class ExperimentalConfig(Config): "msc3981_recurse_relations", False ) + # MSC3861: Matrix architecture change to delegate authentication via OIDC + self.msc3861 = MSC3861(**experimental.get("msc3861", {})) + # MSC3970: Scope transaction IDs to devices - self.msc3970_enabled = experimental.get("msc3970_enabled", False) + self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled) + + # Check that none of the other config options conflict with MSC3861 when enabled + self.msc3861.check_config_conflicts(self.root) # MSC4009: E.164 Matrix IDs self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a53984be33..4f986d90cb 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -274,7 +274,7 @@ class AuthHandler: # response. self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {} - self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled + self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled async def validate_user_via_ui_auth( self, @@ -325,7 +325,7 @@ class AuthHandler: LimitExceededError if the ratelimiter's failed request count for this user is too high to proceed """ - if self.oauth_delegation_enabled: + if self.msc3861_oauth_delegation_enabled: raise SynapseError( HTTPStatus.INTERNAL_SERVER_ERROR, "UIA shouldn't be used with MSC3861" ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 0e9f366cba..134bd2e620 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -38,6 +38,7 @@ from twisted.web.resource import Resource from synapse.api import errors from synapse.api.errors import SynapseError +from synapse.config import ConfigError from synapse.events import EventBase from synapse.events.presence_router import ( GET_INTERESTED_USERS_CALLBACK, @@ -252,6 +253,7 @@ class ModuleApi: self._device_handler = hs.get_device_handler() self.custom_template_dir = hs.config.server.custom_template_directory self._callbacks = hs.get_module_api_callbacks() + self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled try: app_name = self._hs.config.email.email_app_name @@ -419,6 +421,11 @@ class ModuleApi: Added in Synapse v1.46.0. """ + if self.msc3861_oauth_delegation_enabled: + raise ConfigError( + "Cannot use password auth provider callbacks when OAuth delegation is enabled" + ) + return self._password_auth_provider.register_password_auth_provider_callbacks( check_3pid_auth=check_3pid_auth, on_logged_out=on_logged_out, diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index ccd1f7509c..679ab9f266 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -601,7 +601,7 @@ class ThreepidRestServlet(RestServlet): # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle # `threePidCreds` versus `three_pid_creds`. async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.hs.config.auth.oauth_delegation_enabled: + if self.hs.config.experimental.msc3861.enabled: raise NotFoundError(errcode=Codes.UNRECOGNIZED) if not self.hs.config.registration.enable_3pid_changes: @@ -894,7 +894,7 @@ class AccountStatusRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: - if not hs.config.auth.oauth_delegation_enabled: + if not hs.config.experimental.msc3861.enabled: EmailPasswordRequestTokenRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) @@ -906,7 +906,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: ThreepidBindRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) - if not hs.config.auth.oauth_delegation_enabled: + if not hs.config.experimental.msc3861.enabled: ThreepidAddRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 00e9bff43f..38dff9703f 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -135,7 +135,7 @@ class DeviceRestServlet(RestServlet): self.device_handler = handler self.auth_handler = hs.get_auth_handler() self._msc3852_enabled = hs.config.experimental.msc3852_enabled - self.oauth_delegation_enabled = hs.config.auth.oauth_delegation_enabled + self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled async def on_GET( self, request: SynapseRequest, device_id: str @@ -167,7 +167,7 @@ class DeviceRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, device_id: str ) -> Tuple[int, JsonDict]: - if self.oauth_delegation_enabled: + if self._msc3861_oauth_delegation_enabled: raise UnrecognizedRequestError(code=404) requester = await self.auth.get_user_by_req(request) @@ -350,7 +350,7 @@ class ClaimDehydratedDeviceServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if ( hs.config.worker.worker_app is None - and not hs.config.auth.oauth_delegation_enabled + and not hs.config.experimental.msc3861.enabled ): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index c3ca83c0c8..70b8be1aa2 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -386,7 +386,7 @@ class SigningKeyUploadServlet(RestServlet): # time. Because there is no UIA in MSC3861, for now we throw an error if the # user tries to reset the device signing key when MSC3861 is enabled, but allow # first-time setup. - if self.hs.config.auth.oauth_delegation_enabled: + if self.hs.config.experimental.msc3861.enabled: # There is no way to reset the device signing key with MSC3861 if is_cross_signing_setup: raise SynapseError( diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 4d0eabcb84..d4dc2462b9 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -633,7 +633,7 @@ class CasTicketServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: return LoginRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index b64a6d5961..94ad90942f 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -80,7 +80,7 @@ class LogoutAllRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: return LogoutRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 6866988c38..f8fb0e1dee 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -955,7 +955,7 @@ def _calculate_registration_flows( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: return if hs.config.worker.worker_app is None: diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index dcfd0ad6aa..57335fb913 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -47,7 +47,7 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc } # Expose the JWKS endpoint if OAuth2 delegation is enabled - if hs.config.auth.oauth_delegation_enabled: + if hs.config.experimental.msc3861.enabled: from synapse.rest.synapse.client.jwks import JwksResource resources["/_synapse/jwks"] = JwksResource(hs) diff --git a/synapse/rest/synapse/client/jwks.py b/synapse/rest/synapse/client/jwks.py index 818585843e..7c0a1223fb 100644 --- a/synapse/rest/synapse/client/jwks.py +++ b/synapse/rest/synapse/client/jwks.py @@ -26,8 +26,6 @@ logger = logging.getLogger(__name__) class JwksResource(DirectServeJsonResource): def __init__(self, hs: "HomeServer"): - from authlib.jose.rfc7517 import Key - super().__init__(extract_context=True) # Parameters that are allowed to be exposed in the public key. @@ -53,10 +51,10 @@ class JwksResource(DirectServeJsonResource): "ext", } - secret = hs.config.auth.oauth_delegation_client_secret + key = hs.config.experimental.msc3861.jwk - if isinstance(secret, Key): - private_key = secret.as_dict() + if key is not None: + private_key = key.as_dict() public_key = { k: v for k, v in private_key.items() if k in public_parameters } diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index fd3b17a5ad..b8b4b5379b 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -44,14 +44,15 @@ class WellKnownBuilder: "base_url": self._config.registration.default_identity_server } - if self._config.auth.oauth_delegation_enabled: + # We use the MSC3861 values as they are used by multiple MSCs + if self._config.experimental.msc3861.enabled: result["org.matrix.msc2965.authentication"] = { - "issuer": self._config.auth.oauth_delegation_issuer + "issuer": self._config.experimental.msc3861.issuer } - if self._config.auth.oauth_delegation_account != "": + if self._config.experimental.msc3861.account_management_url is not None: result["org.matrix.msc2965.authentication"][ "account" - ] = self._config.auth.oauth_delegation_account + ] = self._config.experimental.msc3861.account_management_url if self._config.server.extra_well_known_client_content: for ( diff --git a/synapse/server.py b/synapse/server.py index 1c82500f30..0f36ef69cb 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -428,10 +428,10 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_auth(self) -> Auth: - if self.config.auth.oauth_delegation_enabled: - from synapse.api.auth.oauth_delegated import OAuthDelegatedAuth + if self.config.experimental.msc3861.enabled: + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth - return OAuthDelegatedAuth(self) + return MSC3861DelegatedAuth(self) return InternalAuth(self) @cache_in_self diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py new file mode 100644 index 0000000000..c5fc6d6ebb --- /dev/null +++ b/tests/config/test_oauth_delegation.py @@ -0,0 +1,202 @@ +# Copyright 2023 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict +from unittest.mock import Mock + +from synapse.config import ConfigError +from synapse.module_api import ModuleApi +from synapse.types import JsonDict + +from tests.server import get_clock +from tests.unittest import HomeserverTestCase, override_config, skip_unless + +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + + +# These are a few constants that are used as config parameters in the tests. +SERVER_NAME = "test" +ISSUER = "https://issuer/" +CLIENT_ID = "test-client-id" +CLIENT_SECRET = "test-client-secret" +BASE_URL = "https://synapse/" + + +class CustomAuthModule: + """A module which registers a password auth provider.""" + + @staticmethod + def parse_config(config: JsonDict) -> None: + pass + + def __init__(self, config: None, api: ModuleApi): + api.register_password_auth_provider_callbacks( + auth_checkers={("m.login.password", ("password",)): Mock()}, + ) + + +@skip_unless(HAS_AUTHLIB, "requires authlib") +class MSC3861OAuthDelegation(HomeserverTestCase): + """Test that the Homeserver fails to initialize if the config is invalid.""" + + def setUp(self) -> None: + self.reactor, self.clock = get_clock() + self._hs_args = {"clock": self.clock, "reactor": self.reactor} + + def default_config(self) -> Dict[str, Any]: + config = super().default_config() + config["public_baseurl"] = BASE_URL + if "experimental_features" not in config: + config["experimental_features"] = {} + config["experimental_features"]["msc3861"] = { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } + return config + + def test_registration_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "password_config": { + "enabled": True, + }, + } + ) + def test_password_config_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "oidc_providers": [ + { + "idp_id": "microsoft", + "idp_name": "Microsoft", + "issuer": "https://login.microsoftonline.com//v2.0", + "client_id": "", + "client_secret": "", + "scopes": ["openid", "profile"], + "authorization_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/authorize", + "token_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/token", + "userinfo_endpoint": "https://graph.microsoft.com/oidc/userinfo", + } + ], + } + ) + def test_oidc_sso_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "cas_config": { + "enabled": True, + "server_url": "https://cas-server.com", + "displayname_attribute": "name", + "required_attributes": {"userGroup": "staff", "department": "None"}, + }, + } + ) + def test_cas_sso_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "modules": [ + { + "module": f"{__name__}.{CustomAuthModule.__qualname__}", + "config": {}, + } + ], + } + ) + def test_auth_providers_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "jwt_config": { + "enabled": True, + "secret": "my-secret-token", + "algorithm": "HS256", + }, + } + ) + def test_jwt_auth_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3882_enabled": True, + }, + } + ) + def test_msc3882_auth_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "recaptcha_public_key": "test", + "recaptcha_private_key": "test", + "enable_registration_captcha": True, + } + ) + def test_captcha_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "refresh_token_lifetime": "24h", + "refreshable_access_token_lifetime": "10m", + "nonrefreshable_access_token_lifetime": "24h", + } + ) + def test_refreshable_tokens_cannot_be_enabled(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "session_lifetime": "24h", + } + ) + def test_session_lifetime_cannot_be_set(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index ee1bc5ca7a..081fef51ec 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -109,12 +109,15 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL - config["oauth_delegation"] = { - "enabled": True, - "issuer": ISSUER, - "client_id": CLIENT_ID, - "client_auth_method": "client_secret_post", - "client_secret": CLIENT_SECRET, + config["disable_registration"] = True + config["experimental_features"] = { + "msc3861": { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } } return config diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 34333d88df..377243a170 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -108,14 +108,17 @@ class WellKnownTests(unittest.HomeserverTestCase): @unittest.override_config( { "public_baseurl": "https://homeserver", # this is only required so that client well known is served - "oauth_delegation": { - "enabled": True, - "issuer": "https://issuer", - "account": "https://my-account.issuer", - "client_id": "id", - "client_auth_method": "client_secret_post", - "client_secret": "secret", + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": "https://issuer", + "account_management_url": "https://my-account.issuer", + "client_id": "id", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + }, }, + "disable_registration": True, } ) def test_client_well_known_msc3861_oauth_delegation(self) -> None: From bad1f2cd3558d908b579b6c191bcd7bebecd32be Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Tue, 7 Feb 2023 12:55:54 +0000 Subject: [PATCH 061/562] Tests for JWKS endpoint --- tests/config/test_oauth_delegation.py | 117 +++++++++++++++++++++++--- tests/rest/admin/test_jwks.py | 106 +++++++++++++++++++++++ 2 files changed, 212 insertions(+), 11 deletions(-) create mode 100644 tests/rest/admin/test_jwks.py diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index c5fc6d6ebb..6d294e0144 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -51,6 +51,34 @@ class CustomAuthModule: ) +def _dict_merge(merge_dict: dict, into_dict: dict) -> None: + """Do a deep merge of two dicts + + Recursively merges `merge_dict` into `into_dict`: + * For keys where both `merge_dict` and `into_dict` have a dict value, the values + are recursively merged + * For all other keys, the values in `into_dict` (if any) are overwritten with + the value from `merge_dict`. + + Args: + merge_dict: dict to merge + into_dict: target dict to be modified + """ + for k, v in merge_dict.items(): + if k not in into_dict: + into_dict[k] = v + continue + + current_val = into_dict[k] + + if isinstance(v, dict) and isinstance(current_val, dict): + _dict_merge(v, current_val) + continue + + # otherwise we just overwrite + into_dict[k] = v + + @skip_unless(HAS_AUTHLIB, "requires authlib") class MSC3861OAuthDelegation(HomeserverTestCase): """Test that the Homeserver fails to initialize if the config is invalid.""" @@ -60,18 +88,85 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self._hs_args = {"clock": self.clock, "reactor": self.reactor} def default_config(self) -> Dict[str, Any]: - config = super().default_config() - config["public_baseurl"] = BASE_URL - if "experimental_features" not in config: - config["experimental_features"] = {} - config["experimental_features"]["msc3861"] = { - "enabled": True, - "issuer": ISSUER, - "client_id": CLIENT_ID, - "client_auth_method": "client_secret_post", - "client_secret": CLIENT_SECRET, + default_extra_config = { + "public_baseurl": BASE_URL, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": ISSUER, + "client_id": CLIENT_ID, + "client_auth_method": "client_secret_post", + "client_secret": CLIENT_SECRET, + } + }, } - return config + _dict_merge( + {} if self._extra_config is None else self._extra_config, + default_extra_config, + ) + self._extra_config = default_extra_config + return super().default_config() + + @override_config( + { + "enable_registration": False, + } + ) + def test_client_secret_post_works(self) -> None: + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3861": { + "client_auth_method": "invalid", + } + }, + } + ) + def test_invalid_client_auth_method(self) -> None: + with self.assertRaises(ValueError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3861": { + "client_auth_method": "private_key_jwt", + } + }, + } + ) + def test_invalid_private_key_jwt(self) -> None: + with self.assertRaises(ConfigError): + self.setup_test_homeserver() + + @override_config( + { + "enable_registration": False, + "experimental_features": { + "msc3861": { + "client_auth_method": "private_key_jwt", + "jwk": { + "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", + "kty": "RSA", + "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", + "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", + "e": "AQAB", + "kid": "test", + "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", + "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", + "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + }, + } + }, + } + ) + def test_private_key_jwt_works(self) -> None: + self.setup_test_homeserver() def test_registration_cannot_be_enabled(self) -> None: with self.assertRaises(ConfigError): diff --git a/tests/rest/admin/test_jwks.py b/tests/rest/admin/test_jwks.py new file mode 100644 index 0000000000..a9a6191c73 --- /dev/null +++ b/tests/rest/admin/test_jwks.py @@ -0,0 +1,106 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict + +from twisted.web.resource import Resource + +from synapse.rest.synapse.client import build_synapse_client_resource_tree + +from tests.unittest import HomeserverTestCase, override_config, skip_unless + +try: + import authlib # noqa: F401 + + HAS_AUTHLIB = True +except ImportError: + HAS_AUTHLIB = False + + +@skip_unless(HAS_AUTHLIB, "requires authlib") +class JWKSTestCase(HomeserverTestCase): + """Test /_synapse/jwks JWKS data.""" + + def create_resource_dict(self) -> Dict[str, Resource]: + d = super().create_resource_dict() + d.update(build_synapse_client_resource_tree(self.hs)) + return d + + def test_empty_jwks(self) -> None: + """Test that the JWKS endpoint is not present by default.""" + channel = self.make_request("GET", "/_synapse/jwks") + self.assertEqual(404, channel.code, channel.result) + + @override_config( + { + "disable_registration": True, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": "https://issuer/", + "client_id": "test-client-id", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + }, + }, + } + ) + def test_empty_jwks_for_msc3861_client_secret_post(self) -> None: + """Test that the JWKS endpoint is empty when plain auth is used.""" + channel = self.make_request("GET", "/_synapse/jwks") + self.assertEqual(200, channel.code, channel.result) + self.assertEqual({"keys": []}, channel.json_body) + + @override_config( + { + "disable_registration": True, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": "https://issuer/", + "client_id": "test-client-id", + "client_auth_method": "private_key_jwt", + "jwk": { + "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", + "kty": "RSA", + "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", + "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", + "e": "AQAB", + "kid": "test", + "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", + "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", + "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + }, + }, + }, + } + ) + def test_key_returned_for_msc3861_client_secret_post(self) -> None: + """Test that the JWKS includes public part of JWK for private_key_jwt auth is used.""" + channel = self.make_request("GET", "/_synapse/jwks") + self.assertEqual(200, channel.code, channel.result) + self.assertEqual( + { + "keys": [ + { + "kty": "RSA", + "e": "AQAB", + "kid": "test", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + } + ] + }, + channel.json_body, + ) From c008b44b4f7bb3604be77709c62e6ec78389f8ed Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 4 Apr 2023 18:11:17 +0200 Subject: [PATCH 062/562] Add an admin token for MAS -> Synapse calls --- synapse/api/auth/msc3861_delegated.py | 15 +++++++++++++++ synapse/config/experimental.py | 9 +++++++++ 2 files changed, 24 insertions(+) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 4ca3280bd3..a84b7730b3 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -90,6 +90,7 @@ class MSC3861DelegatedAuth(BaseAuth): self._http_client = hs.get_proxied_http_client() self._hostname = hs.hostname + self._admin_token = self._config.admin_token self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) @@ -176,6 +177,20 @@ class MSC3861DelegatedAuth(BaseAuth): token: str, allow_expired: bool = False, ) -> Requester: + if self._admin_token is not None and token == self._admin_token: + # XXX: This is a temporary solution so that the admin API can be called by + # the OIDC provider. This will be removed once we have OIDC client + # credentials grant support in matrix-authentication-service. + logging.info("Admin toked used") + # XXX: that user doesn't exist and won't be provisioned. + # This is mostly fine for admin calls, but we should also think about doing + # requesters without a user_id. + admin_user = UserID("__oidc_admin", self._hostname) + return create_requester( + user_id=admin_user, + scope=["urn:synapse:admin:*"], + ) + introspection_result = await self._introspect_token(token) logger.info(f"Introspection result: {introspection_result!r}") diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index b9607975f9..d4dff22b0b 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -136,6 +136,15 @@ class MSC3861: ) """The URL of the My Account page on the OIDC Provider as per MSC2965.""" + admin_token: Optional[str] = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str)), + ) + """ + A token that should be considered as an admin token. + This is used by the OIDC provider, to make admin calls to Synapse. + """ + def check_config_conflicts(self, root: RootConfig) -> None: """Checks for any configuration conflicts with other parts of Synapse. From 4d0231b3648d5d70a8e0f4d99a0c040f12f15669 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 16 May 2023 10:52:37 +0200 Subject: [PATCH 063/562] Make AS tokens work & allow ASes to /register --- synapse/api/auth/base.py | 80 +++++++++++++++++++++++- synapse/api/auth/internal.py | 82 +------------------------ synapse/api/auth/msc3861_delegated.py | 9 ++- synapse/rest/client/register.py | 69 +++++++++++++++++++++ tests/handlers/test_oauth_delegation.py | 4 +- 5 files changed, 159 insertions(+), 85 deletions(-) diff --git a/synapse/api/auth/base.py b/synapse/api/auth/base.py index 240f2b90de..9321d6f186 100644 --- a/synapse/api/auth/base.py +++ b/synapse/api/auth/base.py @@ -14,6 +14,8 @@ import logging from typing import TYPE_CHECKING, Optional, Tuple +from netaddr import IPAddress + from twisted.web.server import Request from synapse import event_auth @@ -26,7 +28,8 @@ from synapse.api.errors import ( ) from synapse.appservice import ApplicationService from synapse.logging.opentracing import trace -from synapse.types import Requester +from synapse.types import Requester, create_requester +from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer @@ -271,3 +274,78 @@ class BaseAuth: raise MissingClientTokenError() return query_params[0].decode("ascii") + + @cancellable + async def get_appservice_user( + self, request: Request, access_token: str + ) -> Optional[Requester]: + """ + Given a request, reads the request parameters to determine: + - whether it's an application service that's making this request + - what user the application service should be treated as controlling + (the user_id URI parameter allows an application service to masquerade + any applicable user in its namespace) + - what device the application service should be treated as controlling + (the device_id[^1] URI parameter allows an application service to masquerade + as any device that exists for the relevant user) + + [^1] Unstable and provided by MSC3202. + Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. + + Returns: + the application service `Requester` of that request + + Postconditions: + - The `app_service` field in the returned `Requester` is set + - The `user_id` field in the returned `Requester` is either the application + service sender or the controlled user set by the `user_id` URI parameter + - The returned application service is permitted to control the returned user ID. + - The returned device ID, if present, has been checked to be a valid device ID + for the returned user ID. + """ + DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" + + app_service = self.store.get_app_service_by_token(access_token) + if app_service is None: + return None + + if app_service.ip_range_whitelist: + ip_address = IPAddress(request.getClientAddress().host) + if ip_address not in app_service.ip_range_whitelist: + return None + + # This will always be set by the time Twisted calls us. + assert request.args is not None + + if b"user_id" in request.args: + effective_user_id = request.args[b"user_id"][0].decode("utf8") + await self.validate_appservice_can_control_user_id( + app_service, effective_user_id + ) + else: + effective_user_id = app_service.sender + + effective_device_id: Optional[str] = None + + if ( + self.hs.config.experimental.msc3202_device_masquerading_enabled + and DEVICE_ID_ARG_NAME in request.args + ): + effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") + # We only just set this so it can't be None! + assert effective_device_id is not None + device_opt = await self.store.get_device( + effective_user_id, effective_device_id + ) + if device_opt is None: + # For now, use 400 M_EXCLUSIVE if the device doesn't exist. + # This is an open thread of discussion on MSC3202 as of 2021-12-09. + raise AuthError( + 400, + f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", + Codes.EXCLUSIVE, + ) + + return create_requester( + effective_user_id, app_service=app_service, device_id=effective_device_id + ) diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index 813d537e53..e2ae198b19 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -12,12 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING import pymacaroons -from netaddr import IPAddress - -from twisted.web.server import Request from synapse.api.errors import ( AuthError, @@ -122,7 +119,7 @@ class InternalAuth(BaseAuth): access_token = self.get_access_token_from_request(request) # First check if it could be a request from an appservice - requester = await self._get_appservice_user(request) + requester = await self.get_appservice_user(request, access_token) if not requester: # If not, it should be from a regular user requester = await self.get_user_by_access_token( @@ -189,81 +186,6 @@ class InternalAuth(BaseAuth): except KeyError: raise MissingClientTokenError() - @cancellable - async def _get_appservice_user(self, request: Request) -> Optional[Requester]: - """ - Given a request, reads the request parameters to determine: - - whether it's an application service that's making this request - - what user the application service should be treated as controlling - (the user_id URI parameter allows an application service to masquerade - any applicable user in its namespace) - - what device the application service should be treated as controlling - (the device_id[^1] URI parameter allows an application service to masquerade - as any device that exists for the relevant user) - - [^1] Unstable and provided by MSC3202. - Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. - - Returns: - the application service `Requester` of that request - - Postconditions: - - The `app_service` field in the returned `Requester` is set - - The `user_id` field in the returned `Requester` is either the application - service sender or the controlled user set by the `user_id` URI parameter - - The returned application service is permitted to control the returned user ID. - - The returned device ID, if present, has been checked to be a valid device ID - for the returned user ID. - """ - DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" - - app_service = self.store.get_app_service_by_token( - self.get_access_token_from_request(request) - ) - if app_service is None: - return None - - if app_service.ip_range_whitelist: - ip_address = IPAddress(request.getClientAddress().host) - if ip_address not in app_service.ip_range_whitelist: - return None - - # This will always be set by the time Twisted calls us. - assert request.args is not None - - if b"user_id" in request.args: - effective_user_id = request.args[b"user_id"][0].decode("utf8") - await self.validate_appservice_can_control_user_id( - app_service, effective_user_id - ) - else: - effective_user_id = app_service.sender - - effective_device_id: Optional[str] = None - - if ( - self.hs.config.experimental.msc3202_device_masquerading_enabled - and DEVICE_ID_ARG_NAME in request.args - ): - effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") - # We only just set this so it can't be None! - assert effective_device_id is not None - device_opt = await self.store.get_device( - effective_user_id, effective_device_id - ) - if device_opt is None: - # For now, use 400 M_EXCLUSIVE if the device doesn't exist. - # This is an open thread of discussion on MSC3202 as of 2021-12-09. - raise AuthError( - 400, - f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", - Codes.EXCLUSIVE, - ) - - return create_requester( - effective_user_id, app_service=app_service, device_id=effective_device_id - ) - async def get_user_by_access_token( self, token: str, diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index a84b7730b3..b84dce2563 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -162,14 +162,19 @@ class MSC3861DelegatedAuth(BaseAuth): ) -> Requester: access_token = self.get_access_token_from_request(request) - # TODO: we probably want to assert the allow_guest inside this call so that we don't provision the user if they don't have enough permission: - requester = await self.get_user_by_access_token(access_token, allow_expired) + requester = await self.get_appservice_user(request, access_token) + if not requester: + # TODO: we probably want to assert the allow_guest inside this call + # so that we don't provision the user if they don't have enough permission: + requester = await self.get_user_by_access_token(access_token, allow_expired) if not allow_guest and requester.is_guest: raise OAuthInsufficientScopeError( ["urn:matrix:org.matrix.msc2967.client:api:*"] ) + request.requester = requester + return requester async def get_user_by_access_token( diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index f8fb0e1dee..d59669f0b6 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -869,6 +869,74 @@ class RegisterRestServlet(RestServlet): return 200, result +class RegisterAppServiceOnlyRestServlet(RestServlet): + """An alternative registration API endpoint that only allows ASes to register + + This replaces the regular /register endpoint if MSC3861. There are two notable + differences with the regular /register endpoint: + - It only allows the `m.login.application_service` login type + - It does not create a device or access token for the just-registered user + + Note that the exact behaviour of this endpoint is not yet finalised. It should be + just good enough to make most ASes work. + """ + + PATTERNS = client_patterns("/register$") + CATEGORY = "Registration/login requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + + self.auth = hs.get_auth() + self.registration_handler = hs.get_registration_handler() + self.ratelimiter = hs.get_registration_ratelimiter() + + @interactive_auth_handler + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + body = parse_json_object_from_request(request) + + client_addr = request.getClientAddress().host + + await self.ratelimiter.ratelimit(None, client_addr, update=False) + + kind = parse_string(request, "kind", default="user") + + if kind == "guest": + raise SynapseError(403, "Guest access is disabled") + elif kind != "user": + raise UnrecognizedRequestError( + f"Do not understand membership kind: {kind}", + ) + + # Pull out the provided username and do basic sanity checks early since + # the auth layer will store these in sessions. + desired_username = body.get("username") + if not isinstance(desired_username, str) or len(desired_username) > 512: + raise SynapseError(400, "Invalid username") + + # Allow only ASes to use this API. + if body.get("type") != APP_SERVICE_REGISTRATION_TYPE: + raise SynapseError(403, "Non-application service registration type") + + if not self.auth.has_access_token(request): + raise SynapseError( + 400, + "Appservice token must be provided when using a type of m.login.application_service", + ) + + # XXX we should check that desired_username is valid. Currently + # we give appservices carte blanche for any insanity in mxids, + # because the IRC bridges rely on being able to register stupid + # IDs. + + as_token = self.auth.get_access_token_from_request(request) + + user_id = await self.registration_handler.appservice_register( + desired_username, as_token + ) + return 200, {"user_id": user_id} + + def _calculate_registration_flows( config: HomeServerConfig, auth_handler: AuthHandler ) -> List[List[str]]: @@ -956,6 +1024,7 @@ def _calculate_registration_flows( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.experimental.msc3861.enabled: + RegisterAppServiceOnlyRestServlet(hs).register(http_server) return if hs.config.worker.worker_app is None: diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 081fef51ec..e53020a58a 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -527,8 +527,8 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self.expect_unrecognized( "GET", "/_matrix/client/v1/register/m.login.registration_token/validity" ) - self.expect_unrecognized("POST", "/_matrix/client/v3/register") - self.expect_unrecognized("GET", "/_matrix/client/v3/register") + # This is still available for AS registrations + # self.expect_unrecognized("POST", "/_matrix/client/v3/register") self.expect_unrecognized("GET", "/_matrix/client/v3/register/available") self.expect_unrecognized( "POST", "/_matrix/client/v3/register/email/requestToken" From e343125b3880bfc55223735a784eb1894db5e9be Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 10 May 2023 18:05:06 +0200 Subject: [PATCH 064/562] Disable incompatible Admin API endpoints --- synapse/rest/admin/__init__.py | 21 +++++++++++++-------- synapse/rest/admin/users.py | 8 ++++++++ tests/handlers/test_oauth_delegation.py | 19 +++++++++++++++++++ 3 files changed, 40 insertions(+), 8 deletions(-) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index c729364839..fe8177ed4d 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -257,9 +257,11 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: DeleteRoomStatusByRoomIdRestServlet(hs).register(http_server) JoinRoomAliasServlet(hs).register(http_server) VersionServlet(hs).register(http_server) - UserAdminServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + UserAdminServlet(hs).register(http_server) UserMembershipRestServlet(hs).register(http_server) - UserTokenRestServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + UserTokenRestServlet(hs).register(http_server) UserRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server) UserMediaStatisticsRestServlet(hs).register(http_server) @@ -274,9 +276,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: RoomEventContextServlet(hs).register(http_server) RateLimitRestServlet(hs).register(http_server) UsernameAvailableRestServlet(hs).register(http_server) - ListRegistrationTokensRestServlet(hs).register(http_server) - NewRegistrationTokenRestServlet(hs).register(http_server) - RegistrationTokenRestServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + ListRegistrationTokensRestServlet(hs).register(http_server) + NewRegistrationTokenRestServlet(hs).register(http_server) + RegistrationTokenRestServlet(hs).register(http_server) DestinationMembershipRestServlet(hs).register(http_server) DestinationResetConnectionRestServlet(hs).register(http_server) DestinationRestServlet(hs).register(http_server) @@ -306,10 +309,12 @@ def register_servlets_for_client_rest_resource( # The following resources can only be run on the main process. if hs.config.worker.worker_app is None: DeactivateAccountRestServlet(hs).register(http_server) - ResetPasswordRestServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + ResetPasswordRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) - UserRegisterServlet(hs).register(http_server) - AccountValidityRenewServlet(hs).register(http_server) + if not hs.config.experimental.msc3861.enabled: + UserRegisterServlet(hs).register(http_server) + AccountValidityRenewServlet(hs).register(http_server) # Load the media repo ones if we're using them. Otherwise load the servlets which # don't need a media repo (typically readonly admin APIs). diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 932333ae57..407fe9c804 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -71,6 +71,7 @@ class UsersRestServletV2(RestServlet): self.auth = hs.get_auth() self.admin_handler = hs.get_admin_handler() self._msc3866_enabled = hs.config.experimental.msc3866.enabled + self._msc3861_enabled = hs.config.experimental.msc3861.enabled async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) @@ -94,7 +95,14 @@ class UsersRestServletV2(RestServlet): user_id = parse_string(request, "user_id") name = parse_string(request, "name") + guests = parse_boolean(request, "guests", default=True) + if self._msc3861_enabled and guests: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "The guests parameter is not supported when MSC3861 is enabled.", + errcode=Codes.INVALID_PARAM, + ) deactivated = parse_boolean(request, "deactivated", default=False) # If support for MSC3866 is not enabled, apply no filtering based on the diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index e53020a58a..b79c43a424 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -31,6 +31,7 @@ from synapse.api.errors import ( InvalidClientTokenError, OAuthInsufficientScopeError, ) +from synapse.rest import admin from synapse.rest.client import account, devices, keys, login, logout, register from synapse.server import HomeServer from synapse.types import JsonDict @@ -104,6 +105,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): register.register_servlets, login.register_servlets, logout.register_servlets, + admin.register_servlets, ] def default_config(self) -> Dict[str, Any]: @@ -557,3 +559,20 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self.expect_unrecognized( "POST", "/_matrix/client/v3/user/{USERNAME}/openid/request_token" ) + + def test_admin_api_endpoints_removed(self) -> None: + """Test that admin API endpoints that were removed in MSC2964 are no longer available.""" + self.expect_unrecognized("GET", "/_synapse/admin/v1/registration_tokens") + self.expect_unrecognized("POST", "/_synapse/admin/v1/registration_tokens/new") + self.expect_unrecognized("GET", "/_synapse/admin/v1/registration_tokens/abcd") + self.expect_unrecognized("PUT", "/_synapse/admin/v1/registration_tokens/abcd") + self.expect_unrecognized( + "DELETE", "/_synapse/admin/v1/registration_tokens/abcd" + ) + self.expect_unrecognized("POST", "/_synapse/admin/v1/reset_password/foo") + self.expect_unrecognized("POST", "/_synapse/admin/v1/users/foo/login") + self.expect_unrecognized("GET", "/_synapse/admin/v1/register") + self.expect_unrecognized("POST", "/_synapse/admin/v1/register") + self.expect_unrecognized("GET", "/_synapse/admin/v1/users/foo/admin") + self.expect_unrecognized("PUT", "/_synapse/admin/v1/users/foo/admin") + self.expect_unrecognized("POST", "/_synapse/admin/v1/account_validity/validity") From ec9379d7e298c24f3530cf48ee34c30aa038feb2 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 12 May 2023 15:22:46 +0200 Subject: [PATCH 065/562] Newsfile. --- changelog.d/15582.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/15582.feature diff --git a/changelog.d/15582.feature b/changelog.d/15582.feature new file mode 100644 index 0000000000..00959500a5 --- /dev/null +++ b/changelog.d/15582.feature @@ -0,0 +1 @@ +Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. From 14a5be9c4d69b5669792f2cdc658c266847a8c4a Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 22 May 2023 15:48:57 +0200 Subject: [PATCH 066/562] Handle errors when introspecting tokens This returns a proper 503 when the introspection endpoint is not working for some reason, which should avoid logging out clients in those cases. --- synapse/api/auth/msc3861_delegated.py | 42 ++++++++++++++++++++++--- tests/handlers/test_oauth_delegation.py | 35 +++++++++++++++++++++ tests/test_utils/__init__.py | 4 +-- 3 files changed, 74 insertions(+), 7 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index b84dce2563..82c66691da 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -27,9 +27,11 @@ from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth from synapse.api.errors import ( AuthError, + HttpResponseException, InvalidClientTokenError, OAuthInsufficientScopeError, StoreError, + SynapseError, ) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable @@ -117,6 +119,21 @@ class MSC3861DelegatedAuth(BaseAuth): return metadata async def _introspect_token(self, token: str) -> IntrospectionToken: + """ + Send a token to the introspection endpoint and returns the introspection response + + Parameters: + token: The token to introspect + + Raises: + HttpResponseException: If the introspection endpoint returns a non-2xx response + ValueError: If the introspection endpoint returns an invalid JSON response + JSONDecodeError: If the introspection endpoint returns a non-JSON response + Exception: If the HTTP request fails + + Returns: + The introspection response + """ metadata = await self._issuer_metadata.get() introspection_endpoint = metadata.get("introspection_endpoint") raw_headers: Dict[str, str] = { @@ -136,7 +153,7 @@ class MSC3861DelegatedAuth(BaseAuth): # Do the actual request # We're not using the SimpleHttpClient util methods as we don't want to - # check the HTTP status code and we do the body encoding ourself. + # check the HTTP status code, and we do the body encoding ourselves. response = await self._http_client.request( method="POST", uri=uri, @@ -145,10 +162,21 @@ class MSC3861DelegatedAuth(BaseAuth): ) resp_body = await make_deferred_yieldable(readBody(response)) - # TODO: Let's not worry about 5xx errors & co. for now and just try - # decoding that as JSON. We should also do some validation of the - # response + + if response.code < 200 or response.code >= 300: + raise HttpResponseException( + response.code, + response.phrase.decode("ascii", errors="replace"), + resp_body, + ) + resp = json_decoder.decode(resp_body.decode("utf-8")) + + if not isinstance(resp, dict): + raise ValueError( + "The introspection endpoint returned an invalid JSON response." + ) + return IntrospectionToken(**resp) async def is_server_admin(self, requester: Requester) -> bool: @@ -196,7 +224,11 @@ class MSC3861DelegatedAuth(BaseAuth): scope=["urn:synapse:admin:*"], ) - introspection_result = await self._introspect_token(token) + try: + introspection_result = await self._introspect_token(token) + except Exception: + logger.exception("Failed to introspect token") + raise SynapseError(503, "Unable to introspect the access token") logger.info(f"Introspection result: {introspection_result!r}") diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index b79c43a424..16ce2c069d 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -30,6 +30,7 @@ from synapse.api.errors import ( Codes, InvalidClientTokenError, OAuthInsufficientScopeError, + SynapseError, ) from synapse.rest import admin from synapse.rest.client import account, devices, keys, login, logout, register @@ -405,6 +406,40 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(requester.device_id, DEVICE) + def test_unavailable_introspection_endpoint(self) -> None: + """The handler should return an internal server error.""" + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + + # The introspection endpoint is returning an error. + self.http_client.request = simple_async_mock( + return_value=FakeResponse(code=500, body=b"Internal Server Error") + ) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + + # The introspection endpoint request fails. + self.http_client.request = simple_async_mock(raises=Exception()) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + + # The introspection endpoint does not return a JSON object. + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, payload=["this is an array", "not an object"] + ) + ) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + + # The introspection endpoint does not return valid JSON. + self.http_client.request = simple_async_mock( + return_value=FakeResponse(code=200, body=b"this is not valid JSON") + ) + error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) + self.assertEqual(error.value.code, 503) + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: # We only generate a master key to simplify the test. master_signing_key = generate_signing_key(device_id) diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index e5dae670a7..c8cc841d95 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -33,7 +33,7 @@ from twisted.web.http import RESPONSES from twisted.web.http_headers import Headers from twisted.web.iweb import IResponse -from synapse.types import JsonDict +from synapse.types import JsonSerializable if TYPE_CHECKING: from sys import UnraisableHookArgs @@ -145,7 +145,7 @@ class FakeResponse: # type: ignore[misc] protocol.connectionLost(Failure(ResponseDone())) @classmethod - def json(cls, *, code: int = 200, payload: JsonDict) -> "FakeResponse": + def json(cls, *, code: int = 200, payload: JsonSerializable) -> "FakeResponse": headers = Headers({"Content-Type": ["application/json"]}) body = json.dumps(payload).encode("utf-8") return cls(code=code, body=body, headers=headers) From 98afc57d59df118a13f894fc66f206bc7409e14a Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 22 May 2023 17:17:49 +0200 Subject: [PATCH 067/562] Make OIDC scope constants --- synapse/api/auth/msc3861_delegated.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 82c66691da..5b0e678c0f 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -44,6 +44,15 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# Scope as defined by MSC2967 +# https://github.com/matrix-org/matrix-spec-proposals/pull/2967 +SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*" +SCOPE_MATRIX_GUEST = "urn:matrix:org.matrix.msc2967.client:api:guest" +SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:" + +# Scope which allows access to the Synapse admin API +SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*" + def scope_to_list(scope: str) -> List[str]: """Convert a scope string to a list of scope tokens""" @@ -197,9 +206,7 @@ class MSC3861DelegatedAuth(BaseAuth): requester = await self.get_user_by_access_token(access_token, allow_expired) if not allow_guest and requester.is_guest: - raise OAuthInsufficientScopeError( - ["urn:matrix:org.matrix.msc2967.client:api:*"] - ) + raise OAuthInsufficientScopeError([SCOPE_MATRIX_API]) request.requester = requester @@ -241,9 +248,9 @@ class MSC3861DelegatedAuth(BaseAuth): scope: List[str] = scope_to_list(introspection_result.get("scope", "")) # Determine type of user based on presence of particular scopes - has_admin_scope = "urn:synapse:admin:*" in scope - has_user_scope = "urn:matrix:org.matrix.msc2967.client:api:*" in scope - has_guest_scope = "urn:matrix:org.matrix.msc2967.client:api:guest" in scope + has_admin_scope = SCOPE_SYNAPSE_ADMIN in scope + has_user_scope = SCOPE_MATRIX_API in scope + has_guest_scope = SCOPE_MATRIX_GUEST in scope is_user = has_user_scope or has_admin_scope is_guest = has_guest_scope and not is_user @@ -299,10 +306,8 @@ class MSC3861DelegatedAuth(BaseAuth): # Find device_id in scope device_id = None for tok in scope: - if tok.startswith("urn:matrix:org.matrix.msc2967.client:device:"): - parts = tok.split(":") - if len(parts) == 5: - device_id = parts[4] + if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX): + device_id = tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] if device_id: # Create the device on the fly if it does not exist From f739bde962daa9bc425c8343f35993ae889dbc67 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 23 May 2023 16:59:53 +0200 Subject: [PATCH 068/562] Reject tokens with multiple device scopes --- synapse/api/auth/msc3861_delegated.py | 30 ++++++++++++++++++++----- tests/handlers/test_oauth_delegation.py | 29 +++++++++++++++++++++++- 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 5b0e678c0f..e4b16c0b5c 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -303,13 +303,31 @@ class MSC3861DelegatedAuth(BaseAuth): else: user_id = UserID.from_string(user_id_str) - # Find device_id in scope - device_id = None - for tok in scope: - if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX): - device_id = tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] + # Find device_ids in scope + # We only allow a single device_id in the scope, so we find them all in the + # scope list, and raise if there are more than one. The OIDC server should be + # the one enforcing valid scopes, so we raise a 500 if we find an invalid scope. + device_ids = [ + tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] + for tok in scope + if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX) + ] + + if len(device_ids) > 1: + raise AuthError( + 500, + "Multiple device IDs in scope", + ) + + device_id = device_ids[0] if device_ids else None + if device_id is not None: + # Sanity check the device_id + if len(device_id) > 255 or len(device_id) < 1: + raise AuthError( + 500, + "Invalid device ID in scope", + ) - if device_id: # Create the device on the fly if it does not exist try: await self.store.get_device( diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 16ce2c069d..0641535512 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -27,6 +27,7 @@ from signedjson.sign import sign_json from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import ( + AuthError, Codes, InvalidClientTokenError, OAuthInsufficientScopeError, @@ -68,8 +69,9 @@ INTROSPECTION_ENDPOINT = ISSUER + "introspect" SYNAPSE_ADMIN_SCOPE = "urn:synapse:admin:*" MATRIX_USER_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:*" MATRIX_GUEST_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:guest" +MATRIX_DEVICE_SCOPE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:" DEVICE = "AABBCCDD" -MATRIX_DEVICE_SCOPE = "urn:matrix:org.matrix.msc2967.client:device:" + DEVICE +MATRIX_DEVICE_SCOPE = MATRIX_DEVICE_SCOPE_PREFIX + DEVICE SUBJECT = "abc-def-ghi" USERNAME = "test-user" USER_ID = "@" + USERNAME + ":" + SERVER_NAME @@ -344,6 +346,31 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(requester.device_id, DEVICE) + def test_multiple_devices(self) -> None: + """The handler should raise an error if multiple devices are found in the scope.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join( + [ + MATRIX_USER_SCOPE, + f"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC", + f"{MATRIX_DEVICE_SCOPE_PREFIX}DDEEFF", + ] + ), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), AuthError) + def test_active_guest_not_allowed(self) -> None: """The handler should return an insufficient scope error.""" From 32a2f050042531ad4673b42789e833e9cd307740 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 26 May 2023 14:50:19 +0200 Subject: [PATCH 069/562] Make the config tests spawn the homeserver only when needed --- synapse/config/experimental.py | 40 ++- tests/config/test_oauth_delegation.py | 358 ++++++++++++-------------- 2 files changed, 187 insertions(+), 211 deletions(-) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d4dff22b0b..1d189b2e26 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -69,7 +69,8 @@ class MSC3861: if value and not HAS_AUTHLIB: raise ConfigError( "MSC3861 is enabled but authlib is not installed. " - "Please install authlib to use MSC3861." + "Please install authlib to use MSC3861.", + ("experimental", "msc3861", "enabled"), ) issuer: str = attr.ib(default="", validator=attr.validators.instance_of(str)) @@ -114,7 +115,8 @@ class MSC3861: if value == ClientAuthMethod.PRIVATE_KEY_JWT and self.jwk is None: raise ConfigError( - "A JWKS must be provided when using the private_key_jwt client auth method" + "A JWKS must be provided when using the private_key_jwt client auth method", + ("experimental", "msc3861", "client_auth_method"), ) if ( @@ -127,7 +129,8 @@ class MSC3861: and self.client_secret is None ): raise ConfigError( - f"A client secret must be provided when using the {value} client auth method" + f"A client secret must be provided when using the {value} client auth method", + ("experimental", "msc3861", "client_auth_method"), ) account_management_url: Optional[str] = attr.ib( @@ -160,12 +163,14 @@ class MSC3861: or root.auth.password_enabled_for_login ): raise ConfigError( - "Password auth cannot be enabled when OAuth delegation is enabled" + "Password auth cannot be enabled when OAuth delegation is enabled", + ("password_config", "enabled"), ) if root.registration.enable_registration: raise ConfigError( - "Registration cannot be enabled when OAuth delegation is enabled" + "Registration cannot be enabled when OAuth delegation is enabled", + ("enable_registration",), ) if ( @@ -183,32 +188,38 @@ class MSC3861: if root.captcha.enable_registration_captcha: raise ConfigError( - "CAPTCHA cannot be enabled when OAuth delegation is enabled" + "CAPTCHA cannot be enabled when OAuth delegation is enabled", + ("captcha", "enable_registration_captcha"), ) if root.experimental.msc3882_enabled: raise ConfigError( - "MSC3882 cannot be enabled when OAuth delegation is enabled" + "MSC3882 cannot be enabled when OAuth delegation is enabled", + ("experimental_features", "msc3882_enabled"), ) if root.registration.refresh_token_lifetime: raise ConfigError( - "refresh_token_lifetime cannot be set when OAuth delegation is enabled" + "refresh_token_lifetime cannot be set when OAuth delegation is enabled", + ("refresh_token_lifetime",), ) if root.registration.nonrefreshable_access_token_lifetime: raise ConfigError( - "nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled" + "nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled", + ("nonrefreshable_access_token_lifetime",), ) if root.registration.session_lifetime: raise ConfigError( - "session_lifetime cannot be set when OAuth delegation is enabled" + "session_lifetime cannot be set when OAuth delegation is enabled", + ("session_lifetime",), ) if not root.experimental.msc3970_enabled: raise ConfigError( - "experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled" + "experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled", + ("experimental_features", "msc3970_enabled"), ) @@ -373,7 +384,12 @@ class ExperimentalConfig(Config): ) # MSC3861: Matrix architecture change to delegate authentication via OIDC - self.msc3861 = MSC3861(**experimental.get("msc3861", {})) + try: + self.msc3861 = MSC3861(**experimental.get("msc3861", {})) + except ValueError as exc: + raise ConfigError( + "Invalid MSC3861 configuration", ("experimental", "msc3861") + ) from exc # MSC3970: Scope transaction IDs to devices self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled) diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 6d294e0144..2ead721b00 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -12,15 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict from unittest.mock import Mock from synapse.config import ConfigError +from synapse.config.homeserver import HomeServerConfig from synapse.module_api import ModuleApi from synapse.types import JsonDict -from tests.server import get_clock -from tests.unittest import HomeserverTestCase, override_config, skip_unless +from tests.server import get_clock, setup_test_homeserver +from tests.unittest import TestCase, skip_unless +from tests.utils import default_config try: import authlib # noqa: F401 @@ -51,45 +52,15 @@ class CustomAuthModule: ) -def _dict_merge(merge_dict: dict, into_dict: dict) -> None: - """Do a deep merge of two dicts - - Recursively merges `merge_dict` into `into_dict`: - * For keys where both `merge_dict` and `into_dict` have a dict value, the values - are recursively merged - * For all other keys, the values in `into_dict` (if any) are overwritten with - the value from `merge_dict`. - - Args: - merge_dict: dict to merge - into_dict: target dict to be modified - """ - for k, v in merge_dict.items(): - if k not in into_dict: - into_dict[k] = v - continue - - current_val = into_dict[k] - - if isinstance(v, dict) and isinstance(current_val, dict): - _dict_merge(v, current_val) - continue - - # otherwise we just overwrite - into_dict[k] = v - - @skip_unless(HAS_AUTHLIB, "requires authlib") -class MSC3861OAuthDelegation(HomeserverTestCase): +class MSC3861OAuthDelegation(TestCase): """Test that the Homeserver fails to initialize if the config is invalid.""" def setUp(self) -> None: - self.reactor, self.clock = get_clock() - self._hs_args = {"clock": self.clock, "reactor": self.reactor} - - def default_config(self) -> Dict[str, Any]: - default_extra_config = { + self.config_dict: JsonDict = { + **default_config("test"), "public_baseurl": BASE_URL, + "enable_registration": False, "experimental_features": { "msc3861": { "enabled": True, @@ -100,198 +71,187 @@ class MSC3861OAuthDelegation(HomeserverTestCase): } }, } - _dict_merge( - {} if self._extra_config is None else self._extra_config, - default_extra_config, - ) - self._extra_config = default_extra_config - return super().default_config() - @override_config( - { - "enable_registration": False, - } - ) + def parse_config(self) -> HomeServerConfig: + config = HomeServerConfig() + config.parse_config_dict(self.config_dict, "", "") + return config + def test_client_secret_post_works(self) -> None: - self.setup_test_homeserver() + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_post", + client_secret=CLIENT_SECRET, + ) - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3861": { - "client_auth_method": "invalid", - } - }, - } - ) - def test_invalid_client_auth_method(self) -> None: - with self.assertRaises(ValueError): - self.setup_test_homeserver() + self.parse_config() + + def test_client_secret_post_requires_client_secret(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_post", + client_secret=None, + ) - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3861": { - "client_auth_method": "private_key_jwt", - } - }, - } - ) - def test_invalid_private_key_jwt(self) -> None: with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() + + def test_client_secret_basic_works(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_basic", + client_secret=CLIENT_SECRET, + ) + + self.parse_config() + + def test_client_secret_basic_requires_client_secret(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_basic", + client_secret=None, + ) + + with self.assertRaises(ConfigError): + self.parse_config() + + def test_client_secret_jwt_works(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_jwt", + client_secret=CLIENT_SECRET, + ) + + self.parse_config() + + def test_client_secret_jwt_requires_client_secret(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="client_secret_jwt", + client_secret=None, + ) + + with self.assertRaises(ConfigError): + self.parse_config() + + def test_invalid_client_auth_method(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="invalid", + ) + + with self.assertRaises(ConfigError): + self.parse_config() + + def test_private_key_jwt_requires_jwk(self) -> None: + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="private_key_jwt", + ) + + with self.assertRaises(ConfigError): + self.parse_config() - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3861": { - "client_auth_method": "private_key_jwt", - "jwk": { - "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", - "kty": "RSA", - "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", - "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", - "e": "AQAB", - "kid": "test", - "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", - "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", - "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", - "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", - }, - } - }, - } - ) def test_private_key_jwt_works(self) -> None: - self.setup_test_homeserver() + self.config_dict["experimental_features"]["msc3861"].update( + client_auth_method="private_key_jwt", + jwk={ + "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8", + "kty": "RSA", + "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU", + "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ", + "e": "AQAB", + "kid": "test", + "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI", + "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8", + "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE", + "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw", + }, + ) + self.parse_config() def test_registration_cannot_be_enabled(self) -> None: + self.config_dict["enable_registration"] = True with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "password_config": { - "enabled": True, - }, - } - ) def test_password_config_cannot_be_enabled(self) -> None: + self.config_dict["password_config"] = {"enabled": True} with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "oidc_providers": [ - { - "idp_id": "microsoft", - "idp_name": "Microsoft", - "issuer": "https://login.microsoftonline.com//v2.0", - "client_id": "", - "client_secret": "", - "scopes": ["openid", "profile"], - "authorization_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/authorize", - "token_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/token", - "userinfo_endpoint": "https://graph.microsoft.com/oidc/userinfo", - } - ], - } - ) def test_oidc_sso_cannot_be_enabled(self) -> None: - with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.config_dict["oidc_providers"] = [ + { + "idp_id": "microsoft", + "idp_name": "Microsoft", + "issuer": "https://login.microsoftonline.com//v2.0", + "client_id": "", + "client_secret": "", + "scopes": ["openid", "profile"], + "authorization_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/authorize", + "token_endpoint": "https://login.microsoftonline.com//oauth2/v2.0/token", + "userinfo_endpoint": "https://graph.microsoft.com/oidc/userinfo", + } + ] + + with self.assertRaises(ConfigError): + self.parse_config() - @override_config( - { - "enable_registration": False, - "cas_config": { - "enabled": True, - "server_url": "https://cas-server.com", - "displayname_attribute": "name", - "required_attributes": {"userGroup": "staff", "department": "None"}, - }, - } - ) def test_cas_sso_cannot_be_enabled(self) -> None: - with self.assertRaises(ConfigError): - self.setup_test_homeserver() - - @override_config( - { - "enable_registration": False, - "modules": [ - { - "module": f"{__name__}.{CustomAuthModule.__qualname__}", - "config": {}, - } - ], + self.config_dict["cas_config"] = { + "enabled": True, + "server_url": "https://cas-server.com", + "displayname_attribute": "name", + "required_attributes": {"userGroup": "staff", "department": "None"}, } - ) + + with self.assertRaises(ConfigError): + self.parse_config() + def test_auth_providers_cannot_be_enabled(self) -> None: - with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.config_dict["modules"] = [ + { + "module": f"{__name__}.{CustomAuthModule.__qualname__}", + "config": {}, + } + ] + + # This requires actually setting up an HS, as the module will be run on setup, + # which should raise as the module tries to register an auth provider + config = self.parse_config() + reactor, clock = get_clock() + with self.assertRaises(ConfigError): + setup_test_homeserver( + self.addCleanup, reactor=reactor, clock=clock, config=config + ) - @override_config( - { - "enable_registration": False, - "jwt_config": { - "enabled": True, - "secret": "my-secret-token", - "algorithm": "HS256", - }, - } - ) def test_jwt_auth_cannot_be_enabled(self) -> None: - with self.assertRaises(ConfigError): - self.setup_test_homeserver() - - @override_config( - { - "enable_registration": False, - "experimental_features": { - "msc3882_enabled": True, - }, + self.config_dict["jwt_config"] = { + "enabled": True, + "secret": "my-secret-token", + "algorithm": "HS256", } - ) + + with self.assertRaises(ConfigError): + self.parse_config() + def test_msc3882_auth_cannot_be_enabled(self) -> None: + self.config_dict["experimental_features"]["msc3882_enabled"] = True with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "recaptcha_public_key": "test", - "recaptcha_private_key": "test", - "enable_registration_captcha": True, - } - ) def test_captcha_cannot_be_enabled(self) -> None: + self.config_dict.update( + enable_registration_captcha=True, + recaptcha_public_key="test", + recaptcha_private_key="test", + ) with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "refresh_token_lifetime": "24h", - "refreshable_access_token_lifetime": "10m", - "nonrefreshable_access_token_lifetime": "24h", - } - ) def test_refreshable_tokens_cannot_be_enabled(self) -> None: + self.config_dict.update( + refresh_token_lifetime="24h", + refreshable_access_token_lifetime="10m", + nonrefreshable_access_token_lifetime="24h", + ) with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() - @override_config( - { - "enable_registration": False, - "session_lifetime": "24h", - } - ) def test_session_lifetime_cannot_be_set(self) -> None: + self.config_dict["session_lifetime"] = "24h" with self.assertRaises(ConfigError): - self.setup_test_homeserver() + self.parse_config() From ceb3dd77db0d3ce992d40175c3f53f6b6ddfa168 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 26 May 2023 15:16:34 +0200 Subject: [PATCH 070/562] Enforce that an admin token also has the basic Matrix API scope --- synapse/api/auth/msc3861_delegated.py | 7 ++----- tests/handlers/test_oauth_delegation.py | 26 ++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index e4b16c0b5c..31c1de0119 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -248,13 +248,10 @@ class MSC3861DelegatedAuth(BaseAuth): scope: List[str] = scope_to_list(introspection_result.get("scope", "")) # Determine type of user based on presence of particular scopes - has_admin_scope = SCOPE_SYNAPSE_ADMIN in scope has_user_scope = SCOPE_MATRIX_API in scope has_guest_scope = SCOPE_MATRIX_GUEST in scope - is_user = has_user_scope or has_admin_scope - is_guest = has_guest_scope and not is_user - if not is_user and not is_guest: + if not has_user_scope and not has_guest_scope: raise InvalidClientTokenError("No scope in token granting user rights") # Match via the sub claim @@ -351,5 +348,5 @@ class MSC3861DelegatedAuth(BaseAuth): user_id=user_id, device_id=device_id, scope=scope, - is_guest=is_guest, + is_guest=(has_guest_scope and not has_user_scope), ) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 0641535512..6309d7b36e 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -224,6 +224,30 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self._assertParams() + def test_active_admin_not_user(self) -> None: + """The handler should raise when the scope has admin right but not user.""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([SYNAPSE_ADMIN_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + def test_active_admin(self) -> None: """The handler should return a requester with admin rights.""" @@ -233,7 +257,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): payload={ "active": True, "sub": SUBJECT, - "scope": " ".join([SYNAPSE_ADMIN_SCOPE]), + "scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]), "username": USERNAME, }, ) From c01343de43b86eb4a6c055547369d07c198a435f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 31 May 2023 07:18:29 -0400 Subject: [PATCH 071/562] Add stricter mypy options (#15694) Enable warn_unused_configs, strict_concatenate, disallow_subclassing_any, and disallow_incomplete_defs. --- changelog.d/15694.misc | 1 + mypy.ini | 23 ++++++++++++++++++++--- synapse/api/auth/msc3861_delegated.py | 2 +- synapse/federation/federation_server.py | 4 ++-- synapse/handlers/oidc.py | 2 +- synapse/handlers/pagination.py | 4 ++-- synapse/http/server.py | 14 +++++++------- synapse/util/__init__.py | 4 ++-- synapse/util/async_helpers.py | 2 +- synapse/util/caches/lrucache.py | 6 ++---- tests/server.py | 2 +- 11 files changed, 40 insertions(+), 24 deletions(-) create mode 100644 changelog.d/15694.misc diff --git a/changelog.d/15694.misc b/changelog.d/15694.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15694.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index a7ec66196d..56cd1d560e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,17 +2,29 @@ namespace_packages = True plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py follow_imports = normal -check_untyped_defs = True show_error_codes = True show_traceback = True mypy_path = stubs warn_unreachable = True -warn_unused_ignores = True local_partial_types = True no_implicit_optional = True + +# Strict checks, see mypy --help +warn_unused_configs = True +# disallow_any_generics = True +disallow_subclassing_any = True +# disallow_untyped_calls = True disallow_untyped_defs = True -strict_equality = True +disallow_incomplete_defs = True +# check_untyped_defs = True +# disallow_untyped_decorators = True warn_redundant_casts = True +warn_unused_ignores = True +# warn_return_any = True +# no_implicit_reexport = True +strict_equality = True +strict_concatenate = True + # Run mypy type checking with the minimum supported Python version to catch new usage # that isn't backwards-compatible (types, overloads, etc). python_version = 3.8 @@ -31,6 +43,7 @@ warn_unused_ignores = False [mypy-synapse.util.caches.treecache] disallow_untyped_defs = False +disallow_incomplete_defs = False ;; Dependencies without annotations ;; Before ignoring a module, check to see if type stubs are available. @@ -40,6 +53,7 @@ disallow_untyped_defs = False ;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s ;; `[tool.poetry.dev-dependencies]` list. +# https://github.com/lepture/authlib/issues/460 [mypy-authlib.*] ignore_missing_imports = True @@ -49,9 +63,11 @@ ignore_missing_imports = True [mypy-lxml] ignore_missing_imports = True +# https://github.com/msgpack/msgpack-python/issues/448 [mypy-msgpack] ignore_missing_imports = True +# https://github.com/wolever/parameterized/issues/143 [mypy-parameterized.*] ignore_missing_imports = True @@ -73,6 +89,7 @@ ignore_missing_imports = True [mypy-srvlookup.*] ignore_missing_imports = True +# https://github.com/twisted/treq/pull/366 [mypy-treq.*] ignore_missing_imports = True diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 31c1de0119..bd4fc9c0ee 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -59,7 +59,7 @@ def scope_to_list(scope: str) -> List[str]: return scope.strip().split(" ") -class PrivateKeyJWTWithKid(PrivateKeyJWT): +class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc] """An implementation of the private_key_jwt client auth method that includes a kid header. This is needed because some providers (Keycloak) require the kid header to figure diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index e17cb840de..149351dda0 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -515,7 +515,7 @@ class FederationServer(FederationBase): logger.error( "Failed to handle PDU %s", event_id, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore + exc_info=(f.type, f.value, f.getTracebackObject()), ) return {"error": str(e)} @@ -1247,7 +1247,7 @@ class FederationServer(FederationBase): logger.error( "Failed to handle PDU %s", event.event_id, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore + exc_info=(f.type, f.value, f.getTracebackObject()), ) received_ts = await self.store.remove_received_event_from_staging( diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index e7e0b5e049..24b68e0301 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -1354,7 +1354,7 @@ class OidcProvider: finish_request(request) -class LogoutToken(JWTClaims): +class LogoutToken(JWTClaims): # type: ignore[misc] """ Holds and verify claims of a logout token, as per https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 63b35c8d62..d5257acb7d 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -360,7 +360,7 @@ class PaginationHandler: except Exception: f = Failure() logger.error( - "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) # type: ignore + "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) ) self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED self._purges_by_id[purge_id].error = f.getErrorMessage() @@ -689,7 +689,7 @@ class PaginationHandler: f = Failure() logger.error( "failed", - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore + exc_info=(f.type, f.value, f.getTracebackObject()), ) self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED self._delete_by_id[delete_id].error = f.getErrorMessage() diff --git a/synapse/http/server.py b/synapse/http/server.py index 04768c6a23..933172c873 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -108,7 +108,7 @@ def return_json_error( if f.check(SynapseError): # mypy doesn't understand that f.check asserts the type. - exc: SynapseError = f.value # type: ignore + exc: SynapseError = f.value error_code = exc.code error_dict = exc.error_dict(config) if exc.headers is not None: @@ -124,7 +124,7 @@ def return_json_error( "Got cancellation before client disconnection from %r: %r", request.request_metrics.name, request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) else: error_code = 500 @@ -134,7 +134,7 @@ def return_json_error( "Failed handle request via %r: %r", request.request_metrics.name, request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) # Only respond with an error response if we haven't already started writing, @@ -172,7 +172,7 @@ def return_html_error( """ if f.check(CodeMessageException): # mypy doesn't understand that f.check asserts the type. - cme: CodeMessageException = f.value # type: ignore + cme: CodeMessageException = f.value code = cme.code msg = cme.msg if cme.headers is not None: @@ -189,7 +189,7 @@ def return_html_error( logger.error( "Failed handle request %r", request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) elif f.check(CancelledError): code = HTTP_STATUS_REQUEST_CANCELLED @@ -199,7 +199,7 @@ def return_html_error( logger.error( "Got cancellation before client disconnection when handling request %r", request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) else: code = HTTPStatus.INTERNAL_SERVER_ERROR @@ -208,7 +208,7 @@ def return_html_error( logger.error( "Failed handle request %r", request, - exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type] + exc_info=(f.type, f.value, f.getTracebackObject()), ) if isinstance(error_template, str): diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9ddd26ccaa..7ea0c4c36b 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -76,7 +76,7 @@ def unwrapFirstError(failure: Failure) -> Failure: # the subFailure's value, which will do a better job of preserving stacktraces. # (actually, you probably want to use yieldable_gather_results anyway) failure.trap(defer.FirstError) - return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations + return failure.value.subFailure P = ParamSpec("P") @@ -178,7 +178,7 @@ def log_failure( """ logger.error( - msg, exc_info=(failure.type, failure.value, failure.getTracebackObject()) # type: ignore[arg-type] + msg, exc_info=(failure.type, failure.value, failure.getTracebackObject()) ) if not consumeErrors: diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 01e3cd46f6..4041e49e71 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -138,7 +138,7 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): for observer in observers: # This is a little bit of magic to correctly propagate stack # traces when we `await` on one of the observer deferreds. - f.value.__failure__ = f # type: ignore[union-attr] + f.value.__failure__ = f try: observer.errback(f) except Exception as e: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 452d5d04c1..ed0da17227 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -93,10 +93,8 @@ VT = TypeVar("VT") # a general type var, distinct from either KT or VT T = TypeVar("T") -P = TypeVar("P") - -class _TimedListNode(ListNode[P]): +class _TimedListNode(ListNode[T]): """A `ListNode` that tracks last access time.""" __slots__ = ["last_access_ts_secs"] @@ -821,7 +819,7 @@ class AsyncLruCache(Generic[KT, VT]): utilize external cache systems that require await behaviour to be created. """ - def __init__(self, *args, **kwargs): # type: ignore + def __init__(self, *args: Any, **kwargs: Any): self._lru_cache: LruCache[KT, VT] = LruCache(*args, **kwargs) async def get( diff --git a/tests/server.py b/tests/server.py index 7296f0a552..a12c3e3b9a 100644 --- a/tests/server.py +++ b/tests/server.py @@ -642,7 +642,7 @@ def _make_test_homeserver_synchronous(server: HomeServer) -> None: pool.runWithConnection = runWithConnection # type: ignore[assignment] pool.runInteraction = runInteraction # type: ignore[assignment] # Replace the thread pool with a threadless 'thread' pool - pool.threadpool = ThreadPool(clock._reactor) # type: ignore[assignment] + pool.threadpool = ThreadPool(clock._reactor) pool.running = True # We've just changed the Databases to run DB transactions on the same From daf3a679089770e00d1b70d8ed2f91ab108b73e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gabriel=20F=C3=A9ron?= Date: Wed, 31 May 2023 15:18:37 +0200 Subject: [PATCH 072/562] Add get_canonical_room_alias to module API (#15450) Co-authored-by: Boxdot --- changelog.d/15450.feature | 1 + synapse/module_api/__init__.py | 27 +++++++++++++++++++++++++++ synapse/storage/controllers/state.py | 2 +- 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15450.feature diff --git a/changelog.d/15450.feature b/changelog.d/15450.feature new file mode 100644 index 0000000000..2102381143 --- /dev/null +++ b/changelog.d/15450.feature @@ -0,0 +1 @@ +Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. \ No newline at end of file diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 134bd2e620..a8d6224a45 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -122,6 +122,7 @@ from synapse.types import ( JsonMapping, Requester, RoomAlias, + RoomID, StateMap, UserID, UserInfo, @@ -1570,6 +1571,32 @@ class ModuleApi: start_timestamp, end_timestamp ) + async def get_canonical_room_alias(self, room_id: RoomID) -> Optional[RoomAlias]: + """ + Retrieve the given room's current canonical alias. + + A room may declare an alias as "canonical", meaning that it is the + preferred alias to use when referring to the room. This function + retrieves that alias from the room's state. + + Added in Synapse v1.86.0. + + Args: + room_id: The Room ID to find the alias of. + + Returns: + None if the room ID does not exist, or if the room exists but has no canonical alias. + Otherwise, the parsed room alias. + """ + room_alias_str = ( + await self._storage_controllers.state.get_canonical_alias_for_room( + room_id.to_string() + ) + ) + if room_alias_str: + return RoomAlias.from_string(room_alias_str) + return None + async def lookup_room_alias(self, room_alias: str) -> Tuple[str, List[str]]: """ Get the room ID associated with a room alias. diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 7089b0a1d8..233df7cce2 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -485,7 +485,7 @@ class StateStorageController: if not event: return None - return event.content.get("canonical_alias") + return event.content.get("alias") @trace @tag_args From 11e15d79b8a0af593fd9467e0cc7f8a9dfcb6c4f Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 31 May 2023 13:59:56 +0000 Subject: [PATCH 073/562] Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. (#15693) * Add indices required to efficiently validate new foreign key constraints on stream_ordering * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15693.bugfix | 1 + synapse/storage/databases/state/bg_updates.py | 31 +++++++++++++++++++ .../77/14bg_indices_event_stream_ordering.sql | 20 ++++++++++++ 3 files changed, 52 insertions(+) create mode 100644 changelog.d/15693.bugfix create mode 100644 synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql diff --git a/changelog.d/15693.bugfix b/changelog.d/15693.bugfix new file mode 100644 index 0000000000..d0325de007 --- /dev/null +++ b/changelog.d/15693.bugfix @@ -0,0 +1 @@ +Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. \ No newline at end of file diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 86eb1a8a08..5b8ba436d4 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -261,6 +261,16 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" STATE_GROUP_EDGES_UNIQUE_INDEX_UPDATE_NAME = "state_group_edges_unique_idx" + CURRENT_STATE_EVENTS_STREAM_ORDERING_INDEX_UPDATE_NAME = ( + "current_state_events_stream_ordering_idx" + ) + ROOM_MEMBERSHIPS_STREAM_ORDERING_INDEX_UPDATE_NAME = ( + "room_memberships_stream_ordering_idx" + ) + LOCAL_CURRENT_MEMBERSHIP_STREAM_ORDERING_INDEX_UPDATE_NAME = ( + "local_current_membership_stream_ordering_idx" + ) + def __init__( self, database: DatabasePool, @@ -297,6 +307,27 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): replaces_index="state_group_edges_idx", ) + # These indices are needed to validate the foreign key constraint + # when events are deleted. + self.db_pool.updates.register_background_index_update( + self.CURRENT_STATE_EVENTS_STREAM_ORDERING_INDEX_UPDATE_NAME, + index_name="current_state_events_stream_ordering_idx", + table="current_state_events", + columns=["event_stream_ordering"], + ) + self.db_pool.updates.register_background_index_update( + self.ROOM_MEMBERSHIPS_STREAM_ORDERING_INDEX_UPDATE_NAME, + index_name="room_memberships_stream_ordering_idx", + table="room_memberships", + columns=["event_stream_ordering"], + ) + self.db_pool.updates.register_background_index_update( + self.LOCAL_CURRENT_MEMBERSHIP_STREAM_ORDERING_INDEX_UPDATE_NAME, + index_name="local_current_membership_stream_ordering_idx", + table="local_current_membership", + columns=["event_stream_ordering"], + ) + async def _background_deduplicate_state( self, progress: dict, batch_size: int ) -> int: diff --git a/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql b/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql new file mode 100644 index 0000000000..ec8cd522ec --- /dev/null +++ b/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql @@ -0,0 +1,20 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) + VALUES + (7714, 'current_state_events_stream_ordering_idx', '{}'), + (7714, 'local_current_membership_stream_ordering_idx', '{}'), + (7714, 'room_memberships_stream_ordering_idx', '{}'); From 874378c0523bb82314434f1f0f2c5e1462a34a5b Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 31 May 2023 10:13:31 -0500 Subject: [PATCH 074/562] Docker fully qualified image names (#15689) * Fully qualified docker image names for the main Dockerfile and Complement related. * Fully qualified docker image names for Dockerfiles associated with building Debian release artifacts. This one is harder and is separate from the other commit in case it wasn't correct or was unwanted. I decided to do the expansion on the docker images in the Dockerfile itself, instead of the various source places that build which distribution that is selected, as it would have been more invasive with the scripts breaking up the string for tagging and such. This one is untested. * Changelog * Update docker/Dockerfile-workers * Update docker/complement/Dockerfile --------- Co-authored-by: reivilibre --- .github/workflows/release-artifacts.yml | 1 + changelog.d/15689.misc | 1 + docker/Dockerfile | 6 +++--- docker/Dockerfile-dhvirtualenv | 4 ++-- docker/Dockerfile-workers | 4 ++-- docker/complement/Dockerfile | 5 +++-- docker/editable.Dockerfile | 2 +- scripts-dev/build_debian_packages.py | 2 ++ 8 files changed, 15 insertions(+), 10 deletions(-) create mode 100644 changelog.d/15689.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index ebd7d298a9..0981200401 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -34,6 +34,7 @@ jobs: - id: set-distros run: | # if we're running from a tag, get the full list of distros; otherwise just use debian:sid + # NOTE: inside the actual Dockerfile-dhvirtualenv, the image name is expanded into its full image path dists='["debian:sid"]' if [[ $GITHUB_REF == refs/tags/* ]]; then dists=$(scripts-dev/build_debian_packages.py --show-dists-json) diff --git a/changelog.d/15689.misc b/changelog.d/15689.misc new file mode 100644 index 0000000000..4262cc9515 --- /dev/null +++ b/changelog.d/15689.misc @@ -0,0 +1 @@ +Add fully qualified docker image names to Dockerfiles. diff --git a/docker/Dockerfile b/docker/Dockerfile index 6107dced43..12cff84131 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11 ### # We hardcode the use of Debian bullseye here because this could change upstream # and other Dockerfiles used for testing are expecting bullseye. -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as requirements # RUN --mount is specific to buildkit and is documented at # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. @@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ ### ### Stage 1: builder ### -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as builder # install the OS build deps RUN \ @@ -158,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \ ### Stage 2: runtime ### -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 2013732422..861129ebc2 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -24,7 +24,7 @@ ARG distro="" # https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but # it's not obviously easier to use that than to build our own.) -FROM ${distro} as builder +FROM docker.io/library/${distro} as builder RUN apt-get update -qq -o Acquire::Languages=none RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ @@ -55,7 +55,7 @@ RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b ### ### Stage 1 ### -FROM ${distro} +FROM docker.io/library/${distro} # Get the distro we want to pull from as a dynamic build variable # (We need to define it in each build stage) diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index faf7f2cef8..adb9a725e3 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION # target image. For repeated rebuilds, this is much faster than apt installing # each time. -FROM debian:bullseye-slim AS deps_base +FROM docker.io/library/debian:bullseye-slim AS deps_base RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ @@ -21,7 +21,7 @@ FROM debian:bullseye-slim AS deps_base # which makes it much easier to copy (but we need to make sure we use an image # based on the same debian version as the synapse image, to make sure we get # the expected version of libc. -FROM redis:6-bullseye AS redis_base +FROM docker.io/library/redis:6-bullseye AS redis_base # now build the final image, based on the the regular Synapse docker image FROM $FROM diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index be1aa1c55e..5103068a49 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -7,6 +7,7 @@ # https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse ARG SYNAPSE_VERSION=latest +# This is an intermediate image, to be built locally (not pulled from a registry). ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION FROM $FROM @@ -19,8 +20,8 @@ FROM $FROM # the same debian version as Synapse's docker image (so the versions of the # shared libraries match). RUN adduser --system --uid 999 postgres --home /var/lib/postgresql - COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql - COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql + COPY --from=docker.io/library/postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql + COPY --from=docker.io/library/postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql ENV PATH="${PATH}:/usr/lib/postgresql/13/bin" ENV PGDATA=/var/lib/postgresql/data diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile index 0e8cf2e712..c53ce1c718 100644 --- a/docker/editable.Dockerfile +++ b/docker/editable.Dockerfile @@ -10,7 +10,7 @@ ARG PYTHON_VERSION=3.9 ### # We hardcode the use of Debian bullseye here because this could change upstream # and other Dockerfiles used for testing are expecting bullseye. -FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye # Install Rust and other dependencies (stolen from normal Dockerfile) # install the OS build deps diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index ede7665011..4c9f134ddd 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -20,6 +20,8 @@ from concurrent.futures import ThreadPoolExecutor from types import FrameType from typing import Collection, Optional, Sequence, Set +# These are expanded inside the dockerfile to be a fully qualified image name. +# e.g. docker.io/library/debian:bullseye DISTS = ( "debian:buster", # oldstable: EOL 2022-08 "debian:bullseye", From 6f18812bb044a2959fdc9881c328578adb7b33f2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 31 May 2023 13:06:57 -0400 Subject: [PATCH 075/562] Add stubs package for lxml. (#15697) The stubs have some issues so this has some generous cast and ignores in it, but it is better than not having stubs. Note that confusing that Element is a function which creates _Element instances (and similarly for Comment). --- changelog.d/15697.misc | 1 + mypy.ini | 3 -- poetry.lock | 25 +++++++-- pyproject.toml | 1 + synapse/media/oembed.py | 32 ++++++----- synapse/media/preview_html.py | 79 ++++++++++++++++++++-------- tests/media/test_html_preview.py | 18 ++++++- tests/media/test_oembed.py | 2 +- tests/media/test_url_previewer.py | 2 +- tests/rest/media/test_url_preview.py | 2 +- 10 files changed, 117 insertions(+), 48 deletions(-) create mode 100644 changelog.d/15697.misc diff --git a/changelog.d/15697.misc b/changelog.d/15697.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/15697.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 56cd1d560e..1038b7d8c7 100644 --- a/mypy.ini +++ b/mypy.ini @@ -60,9 +60,6 @@ ignore_missing_imports = True [mypy-ijson.*] ignore_missing_imports = True -[mypy-lxml] -ignore_missing_imports = True - # https://github.com/msgpack/msgpack-python/issues/448 [mypy-msgpack] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index 0879e64cf1..d8964f5719 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. [[package]] name = "alabaster" @@ -1215,6 +1215,21 @@ html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] source = ["Cython (>=0.29.7)"] +[[package]] +name = "lxml-stubs" +version = "0.4.0" +description = "Type annotations for the lxml package" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "lxml-stubs-0.4.0.tar.gz", hash = "sha256:184877b42127256abc2b932ba8bd0ab5ea80bd0b0fee618d16daa40e0b71abee"}, + {file = "lxml_stubs-0.4.0-py3-none-any.whl", hash = "sha256:3b381e9e82397c64ea3cc4d6f79d1255d015f7b114806d4826218805c10ec003"}, +] + +[package.extras] +test = ["coverage[toml] (==5.2)", "pytest (>=6.0.0)", "pytest-mypy-plugins (==1.9.3)"] + [[package]] name = "markdown-it-py" version = "2.2.0" @@ -3409,22 +3424,22 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"] +all = ["Pympler", "authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pyicu", "pysaml2", "sentry-sdk", "txredisapi"] cache-memory = ["Pympler"] jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] opentracing = ["jaeger-client", "opentracing"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] -redis = ["txredisapi", "hiredis"] +redis = ["hiredis", "txredisapi"] saml2 = ["pysaml2"] sentry = ["sentry-sdk"] systemd = ["systemd-python"] -test = ["parameterized", "idna"] +test = ["idna", "parameterized"] url-preview = ["lxml"] user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "ef3a16dd66177f7141239e1a2d3e07cc14c08f1e4e0c5127184d022bc062da52" +content-hash = "7ad11e62a675e09444cf33ca2de3216fc4efc5874a2575e54d95d577a52439d3" diff --git a/pyproject.toml b/pyproject.toml index 7227bc7523..4476f57ca7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -314,6 +314,7 @@ black = ">=22.3.0" ruff = "0.0.265" # Typechecking +lxml-stubs = ">=0.4.0" mypy = "*" mypy-zope = "*" types-bleach = ">=4.1.0" diff --git a/synapse/media/oembed.py b/synapse/media/oembed.py index c0eaf04be5..5ad9eec80b 100644 --- a/synapse/media/oembed.py +++ b/synapse/media/oembed.py @@ -14,7 +14,7 @@ import html import logging import urllib.parse -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, List, Optional, cast import attr @@ -98,7 +98,7 @@ class OEmbedProvider: # No match. return None - def autodiscover_from_html(self, tree: "etree.Element") -> Optional[str]: + def autodiscover_from_html(self, tree: "etree._Element") -> Optional[str]: """ Search an HTML document for oEmbed autodiscovery information. @@ -109,18 +109,22 @@ class OEmbedProvider: The URL to use for oEmbed information, or None if no URL was found. """ # Search for link elements with the proper rel and type attributes. - for tag in tree.xpath( - "//link[@rel='alternate'][@type='application/json+oembed']" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast( + List["etree._Element"], + tree.xpath("//link[@rel='alternate'][@type='application/json+oembed']"), ): if "href" in tag.attrib: - return tag.attrib["href"] + return cast(str, tag.attrib["href"]) # Some providers (e.g. Flickr) use alternative instead of alternate. - for tag in tree.xpath( - "//link[@rel='alternative'][@type='application/json+oembed']" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast( + List["etree._Element"], + tree.xpath("//link[@rel='alternative'][@type='application/json+oembed']"), ): if "href" in tag.attrib: - return tag.attrib["href"] + return cast(str, tag.attrib["href"]) return None @@ -212,11 +216,12 @@ class OEmbedProvider: return OEmbedResult(open_graph_response, author_name, cache_age) -def _fetch_urls(tree: "etree.Element", tag_name: str) -> List[str]: +def _fetch_urls(tree: "etree._Element", tag_name: str) -> List[str]: results = [] - for tag in tree.xpath("//*/" + tag_name): + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast(List["etree._Element"], tree.xpath("//*/" + tag_name)): if "src" in tag.attrib: - results.append(tag.attrib["src"]) + results.append(cast(str, tag.attrib["src"])) return results @@ -244,11 +249,12 @@ def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) -> parser = etree.HTMLParser(recover=True, encoding="utf-8") # Attempt to parse the body. If this fails, log and return no metadata. - tree = etree.fromstring(html_body, parser) + # TODO Develop of lxml-stubs has this correct. + tree = etree.fromstring(html_body, parser) # type: ignore[arg-type] # The data was successfully parsed, but no tree was found. if tree is None: - return + return # type: ignore[unreachable] # Attempt to find interesting URLs (images, videos, embeds). if "og:image" not in open_graph_response: diff --git a/synapse/media/preview_html.py b/synapse/media/preview_html.py index 516d0434f0..1bc7ccb7f3 100644 --- a/synapse/media/preview_html.py +++ b/synapse/media/preview_html.py @@ -24,6 +24,7 @@ from typing import ( Optional, Set, Union, + cast, ) if TYPE_CHECKING: @@ -115,7 +116,7 @@ def _get_html_media_encodings( def decode_body( body: bytes, uri: str, content_type: Optional[str] = None -) -> Optional["etree.Element"]: +) -> Optional["etree._Element"]: """ This uses lxml to parse the HTML document. @@ -152,11 +153,12 @@ def decode_body( # Attempt to parse the body. Returns None if the body was successfully # parsed, but no tree was found. - return etree.fromstring(body, parser) + # TODO Develop of lxml-stubs has this correct. + return etree.fromstring(body, parser) # type: ignore[arg-type] def _get_meta_tags( - tree: "etree.Element", + tree: "etree._Element", property: str, prefix: str, property_mapper: Optional[Callable[[str], Optional[str]]] = None, @@ -175,9 +177,15 @@ def _get_meta_tags( Returns: A map of tag name to value. """ + # This actually returns Dict[str, str], but the caller sets this as a variable + # which is Dict[str, Optional[str]]. results: Dict[str, Optional[str]] = {} - for tag in tree.xpath( - f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + for tag in cast( + List["etree._Element"], + tree.xpath( + f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]" + ), ): # if we've got more than 50 tags, someone is taking the piss if len(results) >= 50: @@ -187,14 +195,15 @@ def _get_meta_tags( ) return {} - key = tag.attrib[property] + key = cast(str, tag.attrib[property]) if property_mapper: - key = property_mapper(key) + new_key = property_mapper(key) # None is a special value used to ignore a value. - if key is None: + if new_key is None: continue + key = new_key - results[key] = tag.attrib["content"] + results[key] = cast(str, tag.attrib["content"]) return results @@ -219,7 +228,7 @@ def _map_twitter_to_open_graph(key: str) -> Optional[str]: return "og" + key[7:] -def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: +def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -276,24 +285,36 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: if "og:title" not in og: # Attempt to find a title from the title tag, or the biggest header on the page. - title = tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()") + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + title = cast( + List["etree._ElementUnicodeResult"], + tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()"), + ) if title: og["og:title"] = title[0].strip() else: og["og:title"] = None if "og:image" not in og: - meta_image = tree.xpath( - "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + meta_image = cast( + List["etree._ElementUnicodeResult"], + tree.xpath( + "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]" + ), ) # If a meta image is found, use it. if meta_image: og["og:image"] = meta_image[0] else: # Try to find images which are larger than 10px by 10px. + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. # # TODO: consider inlined CSS styles as well as width & height attribs - images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") + images = cast( + List["etree._Element"], + tree.xpath("//img[@src][number(@width)>10][number(@height)>10]"), + ) images = sorted( images, key=lambda i: ( @@ -302,20 +323,29 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: ) # If no images were found, try to find *any* images. if not images: - images = tree.xpath("//img[@src][1]") + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + images = cast(List["etree._Element"], tree.xpath("//img[@src][1]")) if images: - og["og:image"] = images[0].attrib["src"] + og["og:image"] = cast(str, images[0].attrib["src"]) # Finally, fallback to the favicon if nothing else. else: - favicons = tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]") + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + favicons = cast( + List["etree._ElementUnicodeResult"], + tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]"), + ) if favicons: og["og:image"] = favicons[0] if "og:description" not in og: # Check the first meta description tag for content. - meta_description = tree.xpath( - "//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]" + # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this. + meta_description = cast( + List["etree._ElementUnicodeResult"], + tree.xpath( + "//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]" + ), ) # If a meta description is found with content, use it. if meta_description: @@ -332,7 +362,7 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: return og -def parse_html_description(tree: "etree.Element") -> Optional[str]: +def parse_html_description(tree: "etree._Element") -> Optional[str]: """ Calculate a text description based on an HTML document. @@ -368,6 +398,9 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]: "canvas", "img", "picture", + # etree.Comment is a function which creates an etree._Comment element. + # The "tag" attribute of an etree._Comment instance is confusingly the + # etree.Comment function instead of a string. etree.Comment, } @@ -381,8 +414,8 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]: def _iterate_over_text( - tree: Optional["etree.Element"], - tags_to_ignore: Set[Union[str, "etree.Comment"]], + tree: Optional["etree._Element"], + tags_to_ignore: Set[object], stack_limit: int = 1024, ) -> Generator[str, None, None]: """Iterate over the tree returning text nodes in a depth first fashion, @@ -402,7 +435,7 @@ def _iterate_over_text( # This is a stack whose items are elements to iterate over *or* strings # to be returned. - elements: List[Union[str, "etree.Element"]] = [tree] + elements: List[Union[str, "etree._Element"]] = [tree] while elements: el = elements.pop() diff --git a/tests/media/test_html_preview.py b/tests/media/test_html_preview.py index e7da75db3e..ea84bb3d3d 100644 --- a/tests/media/test_html_preview.py +++ b/tests/media/test_html_preview.py @@ -24,7 +24,7 @@ from tests import unittest try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class SummarizeTestCase(unittest.TestCase): @@ -160,6 +160,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -176,6 +177,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -195,6 +197,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( @@ -217,6 +220,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -231,6 +235,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -246,6 +251,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Title"}) @@ -261,6 +267,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) @@ -281,6 +288,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Finally!"}) @@ -296,6 +304,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -324,6 +333,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): FooSome text. """.strip() tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -338,6 +348,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html", "invalid-encoding") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -353,6 +364,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) @@ -367,6 +379,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."}) @@ -380,6 +393,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( og, @@ -401,6 +415,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( og, @@ -419,6 +434,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): with a cheeky SVG and some tail text """ tree = decode_body(html, "http://example.com/test.html") + assert tree is not None og = parse_html_to_open_graph(tree) self.assertEqual( og, diff --git a/tests/media/test_oembed.py b/tests/media/test_oembed.py index c8bf8421da..3bc19cb1cc 100644 --- a/tests/media/test_oembed.py +++ b/tests/media/test_oembed.py @@ -28,7 +28,7 @@ from tests.unittest import HomeserverTestCase try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class OEmbedTests(HomeserverTestCase): diff --git a/tests/media/test_url_previewer.py b/tests/media/test_url_previewer.py index 3c4c7d6765..46ecde5344 100644 --- a/tests/media/test_url_previewer.py +++ b/tests/media/test_url_previewer.py @@ -24,7 +24,7 @@ from tests.unittest import override_config try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class URLPreviewTests(unittest.HomeserverTestCase): diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index 170fb0534a..05d5e39cab 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -40,7 +40,7 @@ from tests.test_utils import SMALL_PNG try: import lxml except ImportError: - lxml = None + lxml = None # type: ignore[assignment] class URLPreviewTests(unittest.HomeserverTestCase): From 0b5f64ff09d44338d2514cbdba80aa4a4f11d1aa Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 31 May 2023 14:35:49 -0500 Subject: [PATCH 076/562] Add Synapse version deploy annotations to Grafana dashboard (#15674) Fix https://github.com/matrix-org/synapse/issues/15662 This manifests as purple lines that show up on all time series panels that you can hover and see what version was deployed. Also added a new "Deployed Synapse versions over time" panel where the color block changes with each version. And mixed this color block into the "Up" time series panel. To get the Grafana dashboard JSON to copy here: use the **Share** icon at the top -> **Export** -> check the **Export for sharing externally** option -> **View JSON** or **Save to file** --- changelog.d/15674.feature | 1 + contrib/grafana/synapse.json | 1068 ++++++++++++++++++++++++++++------ 2 files changed, 895 insertions(+), 174 deletions(-) create mode 100644 changelog.d/15674.feature diff --git a/changelog.d/15674.feature b/changelog.d/15674.feature new file mode 100644 index 0000000000..68cf207dc0 --- /dev/null +++ b/changelog.d/15674.feature @@ -0,0 +1 @@ +Add Syanpse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index f09cd6f87c..f3253b32b9 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -56,6 +56,17 @@ "name": "Annotations & Alerts", "showIn": 0, "type": "dashboard" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "enable": true, + "expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}", + "iconColor": "purple", + "name": "deploys", + "titleFormat": "Deployed {{version}}" } ] }, @@ -670,6 +681,95 @@ "align": false } }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMax": 1, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 10, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 19 + }, + "id": 245, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1", + "legendFormat": "version {{version}}", + "range": true, + "refId": "deployed_synapse_versions" + } + ], + "title": "Deployed Synapse versions over time", + "type": "timeseries" + }, { "aliasColors": {}, "bars": false, @@ -809,6 +909,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "editable": true, @@ -874,11 +975,13 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} system ", "metric": "", + "range": true, "refId": "B", "step": 20 }, @@ -1328,6 +1431,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "fieldConfig": { @@ -1368,7 +1472,15 @@ "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "$$hashKey": "object:116", + "alias": "/^version .*/", + "lines": true, + "linewidth": 6, + "points": false + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, @@ -1377,11 +1489,25 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{job}}-{{index}}", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$datasource" + }, + "editorMode": "code", + "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1", + "hide": false, + "legendFormat": "version {{version}}", + "range": true, + "refId": "deployed_synapse_versions" } ], "thresholds": [], @@ -1788,7 +1914,7 @@ "h": 9, "w": 12, "x": 0, - "y": 56 + "y": 28 }, "heatmap": {}, "hideZeroBuckets": false, @@ -1890,7 +2016,7 @@ "h": 9, "w": 12, "x": 12, - "y": 56 + "y": 28 }, "hiddenSeries": false, "id": 33, @@ -1982,7 +2108,7 @@ "h": 7, "w": 12, "x": 0, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 40, @@ -2070,7 +2196,7 @@ "h": 7, "w": 12, "x": 12, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 46, @@ -2161,7 +2287,7 @@ "h": 7, "w": 12, "x": 0, - "y": 72 + "y": 44 }, "hiddenSeries": false, "id": 44, @@ -2253,7 +2379,7 @@ "h": 7, "w": 12, "x": 12, - "y": 72 + "y": 44 }, "hiddenSeries": false, "id": 45, @@ -2354,7 +2480,7 @@ "h": 9, "w": 12, "x": 0, - "y": 79 + "y": 51 }, "hiddenSeries": false, "id": 118, @@ -2547,7 +2673,7 @@ "h": 9, "w": 12, "x": 12, - "y": 79 + "y": 51 }, "id": 222, "options": { @@ -2646,7 +2772,7 @@ "h": 8, "w": 12, "x": 0, - "y": 57 + "y": 29 }, "hiddenSeries": false, "id": 4, @@ -2768,7 +2894,7 @@ "h": 8, "w": 12, "x": 12, - "y": 57 + "y": 29 }, "hiddenSeries": false, "id": 32, @@ -2867,7 +2993,7 @@ "h": 8, "w": 12, "x": 0, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 139, @@ -2989,7 +3115,7 @@ "h": 8, "w": 12, "x": 12, - "y": 65 + "y": 37 }, "hiddenSeries": false, "id": 52, @@ -3111,7 +3237,7 @@ "h": 8, "w": 12, "x": 0, - "y": 73 + "y": 45 }, "hiddenSeries": false, "id": 7, @@ -3212,7 +3338,7 @@ "h": 8, "w": 12, "x": 12, - "y": 73 + "y": 45 }, "hiddenSeries": false, "id": 47, @@ -3310,7 +3436,7 @@ "h": 9, "w": 12, "x": 0, - "y": 81 + "y": 53 }, "hiddenSeries": false, "id": 103, @@ -3445,7 +3571,7 @@ "h": 9, "w": 12, "x": 0, - "y": 5 + "y": 30 }, "hiddenSeries": false, "id": 99, @@ -3467,7 +3593,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -3538,7 +3664,7 @@ "h": 9, "w": 12, "x": 12, - "y": 5 + "y": 30 }, "hiddenSeries": false, "id": 101, @@ -3560,7 +3686,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -3631,7 +3757,7 @@ "h": 8, "w": 12, "x": 0, - "y": 14 + "y": 39 }, "hiddenSeries": false, "id": 138, @@ -3651,7 +3777,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -3746,7 +3872,7 @@ "h": 9, "w": 12, "x": 0, - "y": 59 + "y": 31 }, "hiddenSeries": false, "id": 79, @@ -3846,7 +3972,7 @@ "h": 9, "w": 12, "x": 12, - "y": 59 + "y": 31 }, "hiddenSeries": false, "id": 83, @@ -3934,6 +4060,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "fieldConfig": { @@ -3948,7 +4075,7 @@ "h": 9, "w": 12, "x": 0, - "y": 68 + "y": 40 }, "hiddenSeries": false, "id": 109, @@ -3983,11 +4110,13 @@ "datasource": { "uid": "$datasource" }, - "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total_total{instance=\"$instance\"}[$bucket_size]))", + "editorMode": "code", + "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations_count_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "pdus", + "range": true, "refId": "A" }, { @@ -4052,7 +4181,7 @@ "h": 9, "w": 12, "x": 12, - "y": 68 + "y": 40 }, "hiddenSeries": false, "id": 111, @@ -4129,6 +4258,250 @@ "align": false } }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Triangular growth may indicate a problem with federation sending from the remote host --- but it may also be the case that everyone is asleep and no messages are being sent.\n\nSee https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_metrics_domains", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMax": 60, + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 60 + } + ] + }, + "unit": "m" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "libera.chat " + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 49 + }, + "id": 243, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_received_pdu_time[10m]))) / 60", + "instant": false, + "legendFormat": "{{server_name}} ", + "range": true, + "refId": "A" + } + ], + "title": "Age of last PDU received from nominated hosts", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Triangular growth may indicate a problem with federation senders on the monitored instance---but it may also be the case that everyone is asleep and no messages are being sent.\n\nSee https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_metrics_domains", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMax": 60, + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 60 + } + ] + }, + "unit": "m" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "libera.chat" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 49 + }, + "id": 241, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_sent_pdu_time[10m]))) / 60", + "instant": false, + "legendFormat": "{{server_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Age of last PDU sent to nominated hosts", + "type": "timeseries" + }, { "aliasColors": {}, "bars": false, @@ -4148,10 +4521,10 @@ "fill": 1, "fillGradient": 0, "gridPos": { - "h": 8, + "h": 9, "w": 12, "x": 0, - "y": 77 + "y": 57 }, "hiddenSeries": false, "id": 142, @@ -4259,7 +4632,7 @@ "h": 9, "w": 12, "x": 12, - "y": 77 + "y": 57 }, "hiddenSeries": false, "id": 140, @@ -4428,7 +4801,7 @@ "h": 9, "w": 12, "x": 0, - "y": 85 + "y": 66 }, "heatmap": {}, "hideZeroBuckets": false, @@ -4533,7 +4906,7 @@ "h": 9, "w": 12, "x": 12, - "y": 86 + "y": 66 }, "hiddenSeries": false, "id": 162, @@ -4745,11 +5118,26 @@ "datasource": { "uid": "$datasource" }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 94 + "y": 75 }, "heatmap": {}, "hideZeroBuckets": false, @@ -4759,6 +5147,48 @@ "show": false }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": -1, + "cellValues": { + "decimals": 2 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "min": 0, + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -4798,6 +5228,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "editable": true, @@ -4815,7 +5246,7 @@ "h": 9, "w": 12, "x": 12, - "y": 95 + "y": 75 }, "hiddenSeries": false, "id": 203, @@ -4837,7 +5268,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -4850,11 +5281,13 @@ "datasource": { "uid": "$datasource" }, - "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}", + "editorMode": "code", + "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "rss {{index}}", + "legendFormat": "{{job}}-{{index}}", + "range": true, "refId": "A", "step": 4 } @@ -4899,6 +5332,7 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "editable": true, @@ -4916,7 +5350,7 @@ "h": 9, "w": 12, "x": 0, - "y": 103 + "y": 84 }, "hiddenSeries": false, "id": 202, @@ -4938,7 +5372,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -4951,11 +5385,13 @@ "datasource": { "uid": "$datasource" }, - "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}", + "editorMode": "code", + "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "rss {{index}}", + "legendFormat": "{{job}}-{{index}}", + "range": true, "refId": "A", "step": 4 } @@ -5009,7 +5445,7 @@ "h": 8, "w": 12, "x": 12, - "y": 104 + "y": 84 }, "hiddenSeries": false, "id": 205, @@ -5029,7 +5465,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -5115,6 +5551,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5162,7 +5600,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1 + "y": 154 }, "id": 239, "options": { @@ -5201,6 +5639,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5248,7 +5688,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1 + "y": 154 }, "id": 235, "options": { @@ -5288,6 +5728,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5335,7 +5777,7 @@ "h": 8, "w": 12, "x": 0, - "y": 9 + "y": 162 }, "id": 237, "options": { @@ -5376,6 +5818,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5423,7 +5867,7 @@ "h": 8, "w": 12, "x": 12, - "y": 9 + "y": 162 }, "id": 233, "options": { @@ -5474,7 +5918,7 @@ "h": 8, "w": 12, "x": 0, - "y": 17 + "y": 170 }, "hiddenSeries": false, "id": 229, @@ -5497,7 +5941,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -5709,6 +6153,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -5773,7 +6219,7 @@ "h": 8, "w": 12, "x": 12, - "y": 17 + "y": 170 }, "id": 231, "options": { @@ -5832,65 +6278,96 @@ "id": 60, "panels": [ { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "fieldConfig": { "defaults": { - "links": [] + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "hertz" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 32 + "y": 155 }, - "hiddenSeries": false, "id": 51, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, "links": [], - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "paceLength": 10, - "percentage": false, - "pluginVersion": "8.4.3", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "9.2.2", "targets": [ { "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", "format": "time_series", "interval": "", "intervalFactor": 2, - "legendFormat": "processed {{job}}", + "legendFormat": "processed {{job}}-{{index}}", + "range": true, "refId": "A", "step": 20 }, @@ -5898,43 +6375,18 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", "format": "time_series", "intervalFactor": 2, - "legendFormat": "failed {{job}}", + "legendFormat": "failed {{job}}-{{index}}", + "range": true, "refId": "B", "step": 20 } ], - "thresholds": [], - "timeRegions": [], "title": "HTTP Push rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "aliasColors": {}, @@ -5957,7 +6409,7 @@ "h": 8, "w": 12, "x": 12, - "y": 32 + "y": 155 }, "hiddenSeries": false, "id": 134, @@ -5978,7 +6430,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "8.4.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -7344,7 +7796,7 @@ "h": 13, "w": 12, "x": 0, - "y": 35 + "y": 158 }, "hiddenSeries": false, "id": 12, @@ -7367,7 +7819,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7442,7 +7894,7 @@ "h": 13, "w": 12, "x": 12, - "y": 35 + "y": 158 }, "hiddenSeries": false, "id": 26, @@ -7465,7 +7917,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7541,7 +7993,7 @@ "h": 13, "w": 12, "x": 0, - "y": 48 + "y": 171 }, "hiddenSeries": false, "id": 13, @@ -7564,7 +8016,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7645,7 +8097,7 @@ "h": 13, "w": 12, "x": 12, - "y": 48 + "y": 171 }, "hiddenSeries": false, "id": 27, @@ -7668,7 +8120,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7743,7 +8195,7 @@ "h": 13, "w": 12, "x": 0, - "y": 61 + "y": 184 }, "hiddenSeries": false, "id": 28, @@ -7765,7 +8217,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7840,7 +8292,7 @@ "h": 13, "w": 12, "x": 12, - "y": 61 + "y": 184 }, "hiddenSeries": false, "id": 25, @@ -7862,7 +8314,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -7930,7 +8382,7 @@ "h": 15, "w": 12, "x": 0, - "y": 74 + "y": 197 }, "hiddenSeries": false, "id": 154, @@ -7951,7 +8403,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -9363,7 +9815,7 @@ "h": 7, "w": 12, "x": 0, - "y": 40 + "y": 162 }, "hiddenSeries": false, "id": 43, @@ -9385,7 +9837,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -9449,6 +9901,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -9498,7 +9952,7 @@ "h": 7, "w": 12, "x": 12, - "y": 40 + "y": 162 }, "id": 41, "links": [], @@ -9545,6 +9999,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -9595,7 +10051,7 @@ "h": 7, "w": 12, "x": 0, - "y": 47 + "y": 169 }, "id": 42, "links": [], @@ -9642,6 +10098,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "axisSoftMin": 1, @@ -9693,7 +10151,7 @@ "h": 7, "w": 12, "x": 12, - "y": 47 + "y": 169 }, "id": 220, "links": [], @@ -9751,7 +10209,7 @@ "h": 7, "w": 12, "x": 0, - "y": 54 + "y": 176 }, "hiddenSeries": false, "id": 144, @@ -9771,7 +10229,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -9844,7 +10302,7 @@ "h": 7, "w": 12, "x": 12, - "y": 54 + "y": 176 }, "hiddenSeries": false, "id": 115, @@ -9866,7 +10324,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -9938,7 +10396,7 @@ "h": 7, "w": 12, "x": 0, - "y": 61 + "y": 183 }, "hiddenSeries": false, "id": 113, @@ -9960,7 +10418,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10058,7 +10516,6 @@ }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10069,7 +10526,7 @@ "h": 9, "w": 12, "x": 0, - "y": 41 + "y": 163 }, "hiddenSeries": false, "id": 67, @@ -10091,7 +10548,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10154,7 +10611,6 @@ }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10165,7 +10621,7 @@ "h": 9, "w": 12, "x": 12, - "y": 41 + "y": 163 }, "hiddenSeries": false, "id": 71, @@ -10187,7 +10643,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10250,7 +10706,6 @@ }, "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10261,7 +10716,7 @@ "h": 9, "w": 12, "x": 0, - "y": 50 + "y": 172 }, "hiddenSeries": false, "id": 121, @@ -10284,7 +10739,7 @@ }, "paceLength": 10, "percentage": false, - "pluginVersion": "7.3.7", + "pluginVersion": "9.2.2", "pointradius": 5, "points": false, "renderer": "flot", @@ -10383,7 +10838,16 @@ "description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10401,6 +10865,46 @@ "show": true }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#B877D9", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10442,7 +10946,6 @@ "description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10471,8 +10974,11 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -10543,7 +11049,16 @@ "description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10561,6 +11076,46 @@ "show": true }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#5794F2", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10602,7 +11157,6 @@ "description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10630,8 +11184,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -10732,7 +11289,16 @@ "description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10750,6 +11316,46 @@ "show": true }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#FF9830", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10791,7 +11397,6 @@ "description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -10819,8 +11424,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -10921,7 +11529,16 @@ "description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.", "fieldConfig": { "defaults": { - "custom": {} + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } }, "overrides": [] }, @@ -10939,6 +11556,46 @@ "show": true }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 1, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#73BF69", + "min": 0, + "mode": "opacity", + "reverse": false, + "scale": "exponential", + "scheme": "Oranges", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "short" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -10976,12 +11633,12 @@ "dashLength": 10, "dashes": false, "datasource": { + "type": "prometheus", "uid": "$datasource" }, "description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -11010,8 +11667,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -11024,11 +11684,13 @@ "datasource": { "uid": "$datasource" }, + "editorMode": "code", "expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "50%", + "range": true, "refId": "A" }, { @@ -11106,12 +11768,6 @@ "uid": "$datasource" }, "description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -11134,8 +11790,11 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, - "pluginVersion": "7.1.3", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -12218,6 +12877,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -12266,7 +12927,7 @@ "h": 8, "w": 12, "x": 0, - "y": 46 + "y": 47 }, "id": 191, "options": { @@ -12314,7 +12975,7 @@ "h": 8, "w": 12, "x": 12, - "y": 46 + "y": 47 }, "hiddenSeries": false, "id": 193, @@ -12334,7 +12995,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.0.4", + "pluginVersion": "9.2.2", "pointradius": 2, "points": false, "renderer": "flot", @@ -12404,11 +13065,26 @@ "type": "prometheus", "uid": "$datasource" }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 54 + "y": 55 }, "heatmap": {}, "hideZeroBuckets": false, @@ -12418,6 +13094,48 @@ "show": false }, "links": [], + "options": { + "calculate": false, + "calculation": {}, + "cellGap": -1, + "cellValues": { + "decimals": 2 + }, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "min": 0, + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Inferno", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.2", "reverseYBuckets": false, "targets": [ { @@ -12463,6 +13181,8 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -12507,7 +13227,7 @@ "h": 8, "w": 12, "x": 12, - "y": 54 + "y": 55 }, "id": 223, "options": { @@ -12757,6 +13477,6 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 150, + "version": 160, "weekStart": "" -} \ No newline at end of file +} From 6d9e2fd8782a6610d6daf499d141e67f476b2f8c Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 31 May 2023 15:13:48 -0700 Subject: [PATCH 077/562] Speed up background jobs populate_full_user_id_user_filters and populate_full_user_id_profiles (#15700) --- changelog.d/15700.misc | 1 + synapse/storage/databases/main/filtering.py | 2 +- synapse/storage/databases/main/profile.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15700.misc diff --git a/changelog.d/15700.misc b/changelog.d/15700.misc new file mode 100644 index 0000000000..e96bc681aa --- /dev/null +++ b/changelog.d/15700.misc @@ -0,0 +1 @@ +Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. \ No newline at end of file diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index da31eb44dc..f777777cbf 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -71,7 +71,7 @@ class FilteringWorkerStore(SQLBaseStore): SELECT user_id FROM user_filters WHERE user_id > ? ORDER BY user_id - LIMIT 1 OFFSET 50 + LIMIT 1 OFFSET 1000 """ txn.execute(sql, (lower_bound_id,)) res = txn.fetchone() diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 65c92bef51..21d54c7a7a 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -65,7 +65,7 @@ class ProfileWorkerStore(SQLBaseStore): SELECT user_id FROM profiles WHERE user_id > ? ORDER BY user_id - LIMIT 1 OFFSET 50 + LIMIT 1 OFFSET 1000 """ txn.execute(sql, (lower_bound_id,)) res = txn.fetchone() From a273561c2247ee433f97a31961a30ab00ab19574 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Jun 2023 08:21:37 -0400 Subject: [PATCH 078/562] Add a note about deprecating /register with a user property. (#15703) Application services providing a "user" property (instead of "username") for the /register endpoint was never specified. Deprecate this very old fallback. --- changelog.d/15703.removal | 1 + docs/upgrade.md | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/15703.removal diff --git a/changelog.d/15703.removal b/changelog.d/15703.removal new file mode 100644 index 0000000000..95a2d8e484 --- /dev/null +++ b/changelog.d/15703.removal @@ -0,0 +1 @@ +Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. diff --git a/docs/upgrade.md b/docs/upgrade.md index af999dd91f..49ab00c057 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,17 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.85.0 + +## Application service registration with "user" property deprecation + +Application services should ensure they call the `/register` endpoint with a +`username` property. The legacy `user` property is considered deprecated and +should no longer be included. + +A future version of Synapse (v1.88.0 or later) will remove support for legacy +application service login. + # Upgrading to v1.84.0 ## Deprecation of `worker_replication_*` configuration settings From d1693f03626391097b59ea9568cd8a869ed89569 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Thu, 1 Jun 2023 13:52:51 +0100 Subject: [PATCH 079/562] Implement stable support for MSC3882 to allow an existing device/session to generate a login token for use on a new device/session (#15388) Implements stable support for MSC3882; this involves updating Synapse's support to match the MSC / the spec says. Continue to support the unstable version to allow clients to transition. --- changelog.d/15388.feature | 1 + .../configuration/config_documentation.md | 65 +++++++++++------ synapse/config/auth.py | 10 +++ synapse/config/experimental.py | 13 +--- synapse/rest/client/capabilities.py | 3 + synapse/rest/client/login.py | 31 +++++--- synapse/rest/client/login_token_request.py | 47 ++++++++---- synapse/rest/client/versions.py | 4 +- tests/config/test_oauth_delegation.py | 4 +- tests/rest/client/test_capabilities.py | 28 ++++++++ tests/rest/client/test_login.py | 23 ++++++ tests/rest/client/test_login_token_request.py | 71 ++++++++++++++----- 12 files changed, 225 insertions(+), 75 deletions(-) create mode 100644 changelog.d/15388.feature diff --git a/changelog.d/15388.feature b/changelog.d/15388.feature new file mode 100644 index 0000000000..6cc55cafa2 --- /dev/null +++ b/changelog.d/15388.feature @@ -0,0 +1 @@ +Stable support for [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 5ede6d0a82..0cf6e075ff 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2570,7 +2570,50 @@ Example configuration: ```yaml nonrefreshable_access_token_lifetime: 24h ``` +--- +### `ui_auth` +The amount of time to allow a user-interactive authentication session to be active. + +This defaults to 0, meaning the user is queried for their credentials +before every action, but this can be overridden to allow a single +validation to be re-used. This weakens the protections afforded by +the user-interactive authentication process, by allowing for multiple +(and potentially different) operations to use the same validation session. + +This is ignored for potentially "dangerous" operations (including +deactivating an account, modifying an account password, adding a 3PID, +and minting additional login tokens). + +Use the `session_timeout` sub-option here to change the time allowed for credential validation. + +Example configuration: +```yaml +ui_auth: + session_timeout: "15s" +``` +--- +### `login_via_existing_session` + +Matrix supports the ability of an existing session to mint a login token for +another client. + +Synapse disables this by default as it has security ramifications -- a malicious +client could use the mechanism to spawn more than one session. + +The duration of time the generated token is valid for can be configured with the +`token_timeout` sub-option. + +User-interactive authentication is required when this is enabled unless the +`require_ui_auth` sub-option is set to `False`. + +Example configuration: +```yaml +login_via_existing_session: + enabled: true + require_ui_auth: false + token_timeout: "5m" +``` --- ## Metrics Config options related to metrics. @@ -3415,28 +3458,6 @@ password_config: require_uppercase: true ``` --- -### `ui_auth` - -The amount of time to allow a user-interactive authentication session to be active. - -This defaults to 0, meaning the user is queried for their credentials -before every action, but this can be overridden to allow a single -validation to be re-used. This weakens the protections afforded by -the user-interactive authentication process, by allowing for multiple -(and potentially different) operations to use the same validation session. - -This is ignored for potentially "dangerous" operations (including -deactivating an account, modifying an account password, and -adding a 3PID). - -Use the `session_timeout` sub-option here to change the time allowed for credential validation. - -Example configuration: -```yaml -ui_auth: - session_timeout: "15s" -``` ---- ## Push Configuration settings related to push notifications diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 12e853980e..c7ab428f28 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -60,3 +60,13 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) + + # Logging in with an existing session. + login_via_existing = config.get("login_via_existing_session", {}) + self.login_via_existing_enabled = login_via_existing.get("enabled", False) + self.login_via_existing_require_ui_auth = login_via_existing.get( + "require_ui_auth", True + ) + self.login_via_existing_token_timeout = self.parse_duration( + login_via_existing.get("token_timeout", "5m") + ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1d189b2e26..a9e002cf08 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -192,10 +192,10 @@ class MSC3861: ("captcha", "enable_registration_captcha"), ) - if root.experimental.msc3882_enabled: + if root.auth.login_via_existing_enabled: raise ConfigError( - "MSC3882 cannot be enabled when OAuth delegation is enabled", - ("experimental_features", "msc3882_enabled"), + "Login via existing session cannot be enabled when OAuth delegation is enabled", + ("login_via_existing_session", "enabled"), ) if root.registration.refresh_token_lifetime: @@ -319,13 +319,6 @@ class ExperimentalConfig(Config): # MSC3881: Remotely toggle push notifications for another client self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False) - # MSC3882: Allow an existing session to sign in a new session - self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False) - self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True) - self.msc3882_token_timeout = self.parse_duration( - experimental.get("msc3882_token_timeout", "5m") - ) - # MSC3874: Filtering /messages with rel_types / not_rel_types. self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False) diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 0dbf8f6818..3154b9f77e 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -65,6 +65,9 @@ class CapabilitiesRestServlet(RestServlet): "m.3pid_changes": { "enabled": self.config.registration.enable_3pid_changes }, + "m.get_login_token": { + "enabled": self.config.auth.login_via_existing_enabled, + }, } } diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index d4dc2462b9..6493b00bb8 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -104,6 +104,9 @@ class LoginRestServlet(RestServlet): and hs.config.experimental.msc3866.require_approval_for_new_accounts ) + # Whether get login token is enabled. + self._get_login_token_enabled = hs.config.auth.login_via_existing_enabled + self.auth = hs.get_auth() self.clock = hs.get_clock() @@ -142,6 +145,9 @@ class LoginRestServlet(RestServlet): # to SSO. flows.append({"type": LoginRestServlet.CAS_TYPE}) + # The login token flow requires m.login.token to be advertised. + support_login_token_flow = self._get_login_token_enabled + if self.cas_enabled or self.saml2_enabled or self.oidc_enabled: flows.append( { @@ -153,14 +159,23 @@ class LoginRestServlet(RestServlet): } ) - # While it's valid for us to advertise this login type generally, - # synapse currently only gives out these tokens as part of the - # SSO login flow. - # Generally we don't want to advertise login flows that clients - # don't know how to implement, since they (currently) will always - # fall back to the fallback API if they don't understand one of the - # login flow types returned. - flows.append({"type": LoginRestServlet.TOKEN_TYPE}) + # SSO requires a login token to be generated, so we need to advertise that flow + support_login_token_flow = True + + # While it's valid for us to advertise this login type generally, + # synapse currently only gives out these tokens as part of the + # SSO login flow or as part of login via an existing session. + # + # Generally we don't want to advertise login flows that clients + # don't know how to implement, since they (currently) will always + # fall back to the fallback API if they don't understand one of the + # login flow types returned. + if support_login_token_flow: + tokenTypeFlow: Dict[str, Any] = {"type": LoginRestServlet.TOKEN_TYPE} + # If the login token flow is enabled advertise the get_login_token flag. + if self._get_login_token_enabled: + tokenTypeFlow["get_login_token"] = True + flows.append(tokenTypeFlow) flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types()) diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py index 43ea21d5e6..b1629f94a5 100644 --- a/synapse/rest/client/login_token_request.py +++ b/synapse/rest/client/login_token_request.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING, Tuple +from synapse.api.ratelimiting import Ratelimiter from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -33,7 +34,7 @@ class LoginTokenRequestServlet(RestServlet): Request: - POST /login/token HTTP/1.1 + POST /login/get_token HTTP/1.1 Content-Type: application/json {} @@ -43,30 +44,45 @@ class LoginTokenRequestServlet(RestServlet): HTTP/1.1 200 OK { "login_token": "ABDEFGH", - "expires_in": 3600, + "expires_in_ms": 3600000, } """ - PATTERNS = client_patterns( - "/org.matrix.msc3882/login/token$", releases=[], v1=False, unstable=True - ) + PATTERNS = [ + *client_patterns( + "/login/get_token$", releases=["v1"], v1=False, unstable=False + ), + # TODO: this is no longer needed once unstable MSC3882 does not need to be supported: + *client_patterns( + "/org.matrix.msc3882/login/token$", releases=[], v1=False, unstable=True + ), + ] def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self.server_name = hs.config.server.server_name + self._main_store = hs.get_datastores().main self.auth_handler = hs.get_auth_handler() - self.token_timeout = hs.config.experimental.msc3882_token_timeout - self.ui_auth = hs.config.experimental.msc3882_ui_auth + self.token_timeout = hs.config.auth.login_via_existing_token_timeout + self._require_ui_auth = hs.config.auth.login_via_existing_require_ui_auth + + # Ratelimit aggressively to a maxmimum of 1 request per minute. + # + # This endpoint can be used to spawn additional sessions and could be + # abused by a malicious client to create many sessions. + self._ratelimiter = Ratelimiter( + store=self._main_store, + clock=hs.get_clock(), + rate_hz=1 / 60, + burst_count=1, + ) @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) body = parse_json_object_from_request(request) - if self.ui_auth: + if self._require_ui_auth: await self.auth_handler.validate_user_via_ui_auth( requester, request, @@ -75,9 +91,12 @@ class LoginTokenRequestServlet(RestServlet): can_skip_ui_auth=False, # Don't allow skipping of UI auth ) + # Ensure that this endpoint isn't being used too often. (Ensure this is + # done *after* UI auth.) + await self._ratelimiter.ratelimit(None, requester.user.to_string().lower()) + login_token = await self.auth_handler.create_login_token_for_user_id( user_id=requester.user.to_string(), - auth_provider_id="org.matrix.msc3882.login_token_request", duration_ms=self.token_timeout, ) @@ -85,11 +104,13 @@ class LoginTokenRequestServlet(RestServlet): 200, { "login_token": login_token, + # TODO: this is no longer needed once unstable MSC3882 does not need to be supported: "expires_in": self.token_timeout // 1000, + "expires_in_ms": self.token_timeout, }, ) def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.experimental.msc3882_enabled: + if hs.config.auth.login_via_existing_enabled: LoginTokenRequestServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 32df054f56..547bf34df1 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -113,8 +113,8 @@ class VersionsRestServlet(RestServlet): "fi.mau.msc2815": self.config.experimental.msc2815_enabled, # Adds a ping endpoint for appservices to check HS->AS connection "fi.mau.msc2659.stable": True, # TODO: remove when "v1.7" is added above - # Adds support for login token requests as per MSC3882 - "org.matrix.msc3882": self.config.experimental.msc3882_enabled, + # TODO: this is no longer needed once unstable MSC3882 does not need to be supported: + "org.matrix.msc3882": self.config.auth.login_via_existing_enabled, # Adds support for remotely enabling/disabling pushers, as per MSC3881 "org.matrix.msc3881": self.config.experimental.msc3881_enabled, # Adds support for filtering /messages by event relation. diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 2ead721b00..f57c813a58 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -228,8 +228,8 @@ class MSC3861OAuthDelegation(TestCase): with self.assertRaises(ConfigError): self.parse_config() - def test_msc3882_auth_cannot_be_enabled(self) -> None: - self.config_dict["experimental_features"]["msc3882_enabled"] = True + def test_login_via_existing_session_cannot_be_enabled(self) -> None: + self.config_dict["login_via_existing_session"] = {"enabled": True} with self.assertRaises(ConfigError): self.parse_config() diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py index c16e8d43f4..cf23430f6a 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py @@ -186,3 +186,31 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.assertGreater(len(details["support"]), 0) for room_version in details["support"]: self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, str(room_version)) + + def test_get_get_token_login_fields_when_disabled(self) -> None: + """By default login via an existing session is disabled.""" + access_token = self.get_success( + self.auth_handler.create_access_token_for_user_id( + self.user, device_id=None, valid_until_ms=None + ) + ) + + channel = self.make_request("GET", self.url, access_token=access_token) + capabilities = channel.json_body["capabilities"] + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertFalse(capabilities["m.get_login_token"]["enabled"]) + + @override_config({"login_via_existing_session": {"enabled": True}}) + def test_get_get_token_login_fields_when_enabled(self) -> None: + access_token = self.get_success( + self.auth_handler.create_access_token_for_user_id( + self.user, device_id=None, valid_until_ms=None + ) + ) + + channel = self.make_request("GET", self.url, access_token=access_token) + capabilities = channel.json_body["capabilities"] + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertTrue(capabilities["m.get_login_token"]["enabled"]) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index dc32982e22..f3c3bc69a9 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -446,6 +446,29 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"] ) + def test_get_login_flows_with_login_via_existing_disabled(self) -> None: + """GET /login should return m.login.token without get_login_token""" + channel = self.make_request("GET", "/_matrix/client/r0/login") + self.assertEqual(channel.code, 200, channel.result) + + flows = {flow["type"]: flow for flow in channel.json_body["flows"]} + self.assertNotIn("m.login.token", flows) + + @override_config({"login_via_existing_session": {"enabled": True}}) + def test_get_login_flows_with_login_via_existing_enabled(self) -> None: + """GET /login should return m.login.token with get_login_token true""" + channel = self.make_request("GET", "/_matrix/client/r0/login") + self.assertEqual(channel.code, 200, channel.result) + + self.assertCountEqual( + channel.json_body["flows"], + [ + {"type": "m.login.token", "get_login_token": True}, + {"type": "m.login.password"}, + {"type": "m.login.application_service"}, + ], + ) + @skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC") class MultiSSOTestCase(unittest.HomeserverTestCase): diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py index b8187db982..f05e619aa8 100644 --- a/tests/rest/client/test_login_token_request.py +++ b/tests/rest/client/test_login_token_request.py @@ -15,14 +15,14 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.rest import admin -from synapse.rest.client import login, login_token_request +from synapse.rest.client import login, login_token_request, versions from synapse.server import HomeServer from synapse.util import Clock from tests import unittest from tests.unittest import override_config -endpoint = "/_matrix/client/unstable/org.matrix.msc3882/login/token" +GET_TOKEN_ENDPOINT = "/_matrix/client/v1/login/get_token" class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): @@ -30,6 +30,7 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): login.register_servlets, admin.register_servlets, login_token_request.register_servlets, + versions.register_servlets, # TODO: remove once unstable revision 0 support is removed ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: @@ -46,26 +47,26 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): self.password = "password" def test_disabled(self) -> None: - channel = self.make_request("POST", endpoint, {}, access_token=None) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=None) self.assertEqual(channel.code, 404) self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) self.assertEqual(channel.code, 404) - @override_config({"experimental_features": {"msc3882_enabled": True}}) + @override_config({"login_via_existing_session": {"enabled": True}}) def test_require_auth(self) -> None: - channel = self.make_request("POST", endpoint, {}, access_token=None) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=None) self.assertEqual(channel.code, 401) - @override_config({"experimental_features": {"msc3882_enabled": True}}) + @override_config({"login_via_existing_session": {"enabled": True}}) def test_uia_on(self) -> None: user_id = self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) self.assertEqual(channel.code, 401) self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) @@ -80,9 +81,9 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): }, } - channel = self.make_request("POST", endpoint, uia, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, uia, access_token=token) self.assertEqual(channel.code, 200) - self.assertEqual(channel.json_body["expires_in"], 300) + self.assertEqual(channel.json_body["expires_in_ms"], 300000) login_token = channel.json_body["login_token"] @@ -95,15 +96,15 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.json_body["user_id"], user_id) @override_config( - {"experimental_features": {"msc3882_enabled": True, "msc3882_ui_auth": False}} + {"login_via_existing_session": {"enabled": True, "require_ui_auth": False}} ) def test_uia_off(self) -> None: user_id = self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) self.assertEqual(channel.code, 200) - self.assertEqual(channel.json_body["expires_in"], 300) + self.assertEqual(channel.json_body["expires_in_ms"], 300000) login_token = channel.json_body["login_token"] @@ -117,10 +118,10 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): @override_config( { - "experimental_features": { - "msc3882_enabled": True, - "msc3882_ui_auth": False, - "msc3882_token_timeout": "15s", + "login_via_existing_session": { + "enabled": True, + "require_ui_auth": False, + "token_timeout": "15s", } } ) @@ -128,6 +129,40 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): self.register_user(self.user, self.password) token = self.login(self.user, self.password) - channel = self.make_request("POST", endpoint, {}, access_token=token) + channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["expires_in_ms"], 15000) + + @override_config( + { + "login_via_existing_session": { + "enabled": True, + "require_ui_auth": False, + "token_timeout": "15s", + } + } + ) + def test_unstable_support(self) -> None: + # TODO: remove support for unstable MSC3882 is no longer needed + + # check feature is advertised in versions response: + channel = self.make_request( + "GET", "/_matrix/client/versions", {}, access_token=None + ) + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body["unstable_features"]["org.matrix.msc3882"], True + ) + + self.register_user(self.user, self.password) + token = self.login(self.user, self.password) + + # check feature is available via the unstable endpoint and returns an expires_in value in seconds + channel = self.make_request( + "POST", + "/_matrix/client/unstable/org.matrix.msc3882/login/token", + {}, + access_token=token, + ) self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["expires_in"], 15) From 5ed0e8c61f6b46289fdc5609e8e573b67c2c1982 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Jun 2023 14:25:20 +0100 Subject: [PATCH 080/562] Cache requests for user's devices from federation (#15675) This should mitigate the issue where lots of different servers requests the same user's devices all at once. --- changelog.d/15675.misc | 1 + synapse/storage/databases/main/devices.py | 4 ++ .../storage/databases/main/end_to_end_keys.py | 67 ++++++++++++++++++- 3 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15675.misc diff --git a/changelog.d/15675.misc b/changelog.d/15675.misc new file mode 100644 index 0000000000..05538fdbef --- /dev/null +++ b/changelog.d/15675.misc @@ -0,0 +1 @@ +Cache requests for user's devices over federation. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index a67fdb3c22..f677d048aa 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1941,6 +1941,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): user_id, stream_ids[-1], ) + txn.call_after( + self._get_e2e_device_keys_for_federation_query_inner.invalidate, + (user_id,), + ) min_stream_id = stream_ids[0] diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 4bc391f213..91ae9c457d 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -16,6 +16,7 @@ import abc from typing import ( TYPE_CHECKING, + Any, Collection, Dict, Iterable, @@ -39,6 +40,7 @@ from synapse.appservice import ( TransactionUnusedFallbackKeys, ) from synapse.logging.opentracing import log_kv, set_tag, trace +from synapse.replication.tcp.streams._base import DeviceListsStream from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( DatabasePool, @@ -104,6 +106,23 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker self.hs.config.federation.allow_device_name_lookup_over_federation ) + def process_replication_rows( + self, + stream_name: str, + instance_name: str, + token: int, + rows: Iterable[Any], + ) -> None: + if stream_name == DeviceListsStream.NAME: + for row in rows: + assert isinstance(row, DeviceListsStream.DeviceListsStreamRow) + if row.entity.startswith("@"): + self._get_e2e_device_keys_for_federation_query_inner.invalidate( + (row.entity,) + ) + + super().process_replication_rows(stream_name, instance_name, token, rows) + async def get_e2e_device_keys_for_federation_query( self, user_id: str ) -> Tuple[int, List[JsonDict]]: @@ -114,6 +133,50 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ now_stream_id = self.get_device_stream_token() + # We need to be careful with the caching here, as we need to always + # return *all* persisted devices, however there may be a lag between a + # new device being persisted and the cache being invalidated. + cached_results = ( + self._get_e2e_device_keys_for_federation_query_inner.cache.get_immediate( + user_id, None + ) + ) + if cached_results is not None: + # Check that there have been no new devices added by another worker + # after the cache. This should be quick as there should be few rows + # with a higher stream ordering. + # + # Note that we invalidate based on the device stream, so we only + # have to check for potential invalidations after the + # `now_stream_id`. + sql = """ + SELECT user_id FROM device_lists_stream + WHERE stream_id >= ? AND user_id = ? + """ + rows = await self.db_pool.execute( + "get_e2e_device_keys_for_federation_query_check", + None, + sql, + now_stream_id, + user_id, + ) + if not rows: + # No new rows, so cache is still valid. + return now_stream_id, cached_results + + # There has, so let's invalidate the cache and run the query. + self._get_e2e_device_keys_for_federation_query_inner.invalidate((user_id,)) + + results = await self._get_e2e_device_keys_for_federation_query_inner(user_id) + + return now_stream_id, results + + @cached(iterable=True) + async def _get_e2e_device_keys_for_federation_query_inner( + self, user_id: str + ) -> List[JsonDict]: + """Get all devices (with any device keys) for a user""" + devices = await self.get_e2e_device_keys_and_signatures([(user_id, None)]) if devices: @@ -134,9 +197,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker results.append(result) - return now_stream_id, results + return results - return now_stream_id, [] + return [] @trace @cancellable From 4c0bffaca5ded573cc26d99bd5831f136f8acacc Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Thu, 1 Jun 2023 09:16:35 -0700 Subject: [PATCH 081/562] 1.85.0rc2 --- CHANGES.md | 21 +++++++++++++++++++++ changelog.d/15693.bugfix | 1 - changelog.d/15700.misc | 1 - changelog.d/15703.removal | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 6 files changed, 28 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/15693.bugfix delete mode 100644 changelog.d/15700.misc delete mode 100644 changelog.d/15703.removal diff --git a/CHANGES.md b/CHANGES.md index 14aac9f14e..f0885a2f1e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.85.0rc2 (2023-06-01) +============================== + +Bugfixes +-------- + +- Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. ([\#15693](https://github.com/matrix-org/synapse/issues/15693)) + + +Deprecations and Removals +------------------------- + +- Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15703](https://github.com/matrix-org/synapse/issues/15703)) + + +Internal Changes +---------------- + +- Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. ([\#15700](https://github.com/matrix-org/synapse/issues/15700)) + + Synapse 1.85.0rc1 (2023-05-30) ============================== diff --git a/changelog.d/15693.bugfix b/changelog.d/15693.bugfix deleted file mode 100644 index d0325de007..0000000000 --- a/changelog.d/15693.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. \ No newline at end of file diff --git a/changelog.d/15700.misc b/changelog.d/15700.misc deleted file mode 100644 index e96bc681aa..0000000000 --- a/changelog.d/15700.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. \ No newline at end of file diff --git a/changelog.d/15703.removal b/changelog.d/15703.removal deleted file mode 100644 index 95a2d8e484..0000000000 --- a/changelog.d/15703.removal +++ /dev/null @@ -1 +0,0 @@ -Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. diff --git a/debian/changelog b/debian/changelog index 2d88cd9d29..ae348ce4df 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.0~rc2) stable; urgency=medium + + * New Synapse release 1.85.0rc2. + + -- Synapse Packaging team Thu, 01 Jun 2023 09:16:18 -0700 + matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium * New Synapse release 1.85.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 7227bc7523..4ed4214f34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.0rc1" +version = "1.85.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 30a5076da8ad776c150ad2745b5f34b4446012e0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 1 Jun 2023 21:27:18 -0500 Subject: [PATCH 082/562] Log when events are (unexpectedly) filtered out of responses in tests (#14213) See https://github.com/matrix-org/synapse/pull/14095#discussion_r990335492 This is useful because when see that a relevant event is an `outlier` or `soft-failed`, then that's a good unexpected indicator explaining why it's not showing up. `filter_events_for_client` is used in `/sync`, `/messages`, `/context` which are all common end-to-end assertion touch points (also notifications, relations). --- changelog.d/14213.misc | 1 + docker/README.md | 3 ++- docker/conf/log.config | 30 +++++++++++++++++++++------ docker/configure_workers_and_start.py | 3 +++ scripts-dev/complement.sh | 4 ++++ synapse/visibility.py | 14 ++++++------- tests/test_utils/logging_setup.py | 12 +++++++++++ 7 files changed, 53 insertions(+), 14 deletions(-) create mode 100644 changelog.d/14213.misc diff --git a/changelog.d/14213.misc b/changelog.d/14213.misc new file mode 100644 index 0000000000..b0689f3d15 --- /dev/null +++ b/changelog.d/14213.misc @@ -0,0 +1 @@ +Log when events are (maybe unexpectedly) filtered out of responses in tests. diff --git a/docker/README.md b/docker/README.md index eda3221c23..08372e95c6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -73,7 +73,8 @@ The following environment variables are supported in `generate` mode: will log sensitive information such as access tokens. This should not be needed unless you are a developer attempting to debug something particularly tricky. - +* `SYNAPSE_LOG_TESTING`: if set, Synapse will log additional information useful + for testing. ## Postgres diff --git a/docker/conf/log.config b/docker/conf/log.config index 90b5179838..5772321202 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -49,17 +49,35 @@ handlers: class: logging.StreamHandler formatter: precise -{% if not SYNAPSE_LOG_SENSITIVE %} -{# - If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO - so that DEBUG entries (containing sensitive information) are not emitted. -#} loggers: + # This is just here so we can leave `loggers` in the config regardless of whether + # we configure other loggers below (avoid empty yaml dict error). + _placeholder: + level: "INFO" + + {% if not SYNAPSE_LOG_SENSITIVE %} + {# + If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO + so that DEBUG entries (containing sensitive information) are not emitted. + #} synapse.storage.SQL: # beware: increasing this to DEBUG will make synapse log sensitive # information such as access tokens. level: INFO -{% endif %} + {% endif %} + + {% if SYNAPSE_LOG_TESTING %} + {# + If Synapse is under test, log a few more useful things for a developer + attempting to debug something particularly tricky. + + With `synapse.visibility.filtered_event_debug`, it logs when events are (maybe + unexpectedly) filtered out of responses in tests. It's just nice to be able to + look at the CI log and figure out why an event isn't being returned. + #} + synapse.visibility.filtered_event_debug: + level: DEBUG + {% endif %} root: level: {{ SYNAPSE_LOG_LEVEL or "INFO" }} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 79b5b87397..87a740e3d4 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -40,6 +40,8 @@ # log level. INFO is the default. # * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged, # regardless of the SYNAPSE_LOG_LEVEL setting. +# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful +# for testing. # # NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined # in the project's README), this script may be run multiple times, and functionality should @@ -947,6 +949,7 @@ def generate_worker_log_config( extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get( "SYNAPSE_LOG_SENSITIVE" ) + extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING") # Render and write the file log_config_filepath = f"/conf/workers/{worker_name}.log.config" diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index cba2799f15..131f26234e 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -269,6 +269,10 @@ if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then export PASS_SYNAPSE_LOG_SENSITIVE=1 fi +# Log a few more useful things for a developer attempting to debug something +# particularly tricky. +export PASS_SYNAPSE_LOG_TESTING=1 + # Run the tests! echo "Images built; running complement" cd "$COMPLEMENT_DIR" diff --git a/synapse/visibility.py b/synapse/visibility.py index 468e22f8f6..fc71dc92a4 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -41,7 +41,7 @@ from synapse.types.state import StateFilter from synapse.util import Clock logger = logging.getLogger(__name__) - +filtered_event_logger = logging.getLogger("synapse.visibility.filtered_event_debug") VISIBILITY_PRIORITY = ( HistoryVisibility.WORLD_READABLE, @@ -97,8 +97,8 @@ async def filter_events_for_client( events_before_filtering = events events = [e for e in events if not e.internal_metadata.is_soft_failed()] if len(events_before_filtering) != len(events): - if logger.isEnabledFor(logging.DEBUG): - logger.debug( + if filtered_event_logger.isEnabledFor(logging.DEBUG): + filtered_event_logger.debug( "filter_events_for_client: Filtered out soft-failed events: Before=%s, After=%s", [event.event_id for event in events_before_filtering], [event.event_id for event in events], @@ -319,7 +319,7 @@ def _check_client_allowed_to_see_event( _check_filter_send_to_client(event, clock, retention_policy, sender_ignored) == _CheckFilter.DENIED ): - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Filtered out event because `_check_filter_send_to_client` returned `_CheckFilter.DENIED`", event.event_id, ) @@ -341,7 +341,7 @@ def _check_client_allowed_to_see_event( ) return event - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Filtered out event because it's an outlier", event.event_id, ) @@ -367,7 +367,7 @@ def _check_client_allowed_to_see_event( membership_result = _check_membership(user_id, event, visibility, state, is_peeking) if not membership_result.allowed: - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Filtered out event because the user can't see the event because of their membership, membership_result.allowed=%s membership_result.joined=%s", event.event_id, membership_result.allowed, @@ -378,7 +378,7 @@ def _check_client_allowed_to_see_event( # If the sender has been erased and the user was not joined at the time, we # must only return the redacted form. if sender_erased and not membership_result.joined: - logger.debug( + filtered_event_logger.debug( "_check_client_allowed_to_see_event(event=%s): Returning pruned event because `sender_erased` and the user was not joined at the time", event.event_id, ) diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index c37f205ed0..199bb06a81 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -53,4 +53,16 @@ def setup_logging() -> None: log_level = os.environ.get("SYNAPSE_TEST_LOG_LEVEL", "ERROR") root_logger.setLevel(log_level) + # In order to not add noise by default (since we only log ERROR messages for trial + # tests as configured above), we only enable this for developers for looking for + # more INFO or DEBUG. + if root_logger.isEnabledFor(logging.INFO): + # Log when events are (maybe unexpectedly) filtered out of responses in tests. It's + # just nice to be able to look at the CI log and figure out why an event isn't being + # returned. + logging.getLogger("synapse.visibility.filtered_event_debug").setLevel( + logging.DEBUG + ) + + # Blow away the pyo3-log cache so that it reloads the configuration. reset_logging_config() From e0f2429d137c74059f5b7f151297e28dbfd82d48 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Fri, 2 Jun 2023 15:13:50 +0200 Subject: [PATCH 083/562] Add a catch-all * to the supported relation types when redacting (#15705) This is an update to MSC3912 implementation --- changelog.d/15705.feature | 1 + synapse/handlers/relations.py | 16 ++- synapse/storage/databases/main/relations.py | 30 ++++++ tests/rest/client/test_redactions.py | 104 +++++++++++++++++++- 4 files changed, 143 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15705.feature diff --git a/changelog.d/15705.feature b/changelog.d/15705.feature new file mode 100644 index 0000000000..e3cbb5a12e --- /dev/null +++ b/changelog.d/15705.feature @@ -0,0 +1 @@ +Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 4824635162..db97f7aede 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -205,16 +205,22 @@ class RelationsHandler: event_id: The event IDs to look and redact relations of. initial_redaction_event: The redaction for the event referred to by event_id. - relation_types: The types of relations to look for. + relation_types: The types of relations to look for. If "*" is in the list, + all related events will be redacted regardless of the type. Raises: ShadowBanError if the requester is shadow-banned """ - related_event_ids = ( - await self._main_store.get_all_relations_for_event_with_types( - event_id, relation_types + if "*" in relation_types: + related_event_ids = await self._main_store.get_all_relations_for_event( + event_id + ) + else: + related_event_ids = ( + await self._main_store.get_all_relations_for_event_with_types( + event_id, relation_types + ) ) - ) for related_event_id in related_event_ids: try: diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 4a6c6c724d..96908f14ba 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -365,6 +365,36 @@ class RelationsWorkerStore(SQLBaseStore): func=get_all_relation_ids_for_event_with_types_txn, ) + async def get_all_relations_for_event( + self, + event_id: str, + ) -> List[str]: + """Get the event IDs of all events that have a relation to the given event. + + Args: + event_id: The event for which to look for related events. + + Returns: + A list of the IDs of the events that relate to the given event. + """ + + def get_all_relation_ids_for_event_txn( + txn: LoggingTransaction, + ) -> List[str]: + rows = self.db_pool.simple_select_list_txn( + txn=txn, + table="event_relations", + keyvalues={"relates_to_id": event_id}, + retcols=["event_id"], + ) + + return [row["event_id"] for row in rows] + + return await self.db_pool.runInteraction( + desc="get_all_relation_ids_for_event", + func=get_all_relation_ids_for_event_txn, + ) + async def event_includes_relation(self, event_id: str) -> bool: """Check if the given event relates to another event. diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 84a60c0b07..b43e95292c 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -217,9 +217,9 @@ class RedactionsTestCase(HomeserverTestCase): self._redact_event(self.mod_access_token, self.room_id, msg_id) @override_config({"experimental_features": {"msc3912_enabled": True}}) - def test_redact_relations(self) -> None: - """Tests that we can redact the relations of an event at the same time as the - event itself. + def test_redact_relations_with_types(self) -> None: + """Tests that we can redact the relations of an event of specific types + at the same time as the event itself. """ # Send a root event. res = self.helper.send_event( @@ -317,6 +317,104 @@ class RedactionsTestCase(HomeserverTestCase): ) self.assertNotIn("redacted_because", event_dict, event_dict) + @override_config({"experimental_features": {"msc3912_enabled": True}}) + def test_redact_all_relations(self) -> None: + """Tests that we can redact all the relations of an event at the same time as the + event itself. + """ + # Send a root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={"msgtype": "m.text", "body": "hello"}, + tok=self.mod_access_token, + ) + root_event_id = res["event_id"] + + # Send an edit to this root event. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "body": " * hello world", + "m.new_content": { + "body": "hello world", + "msgtype": "m.text", + }, + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.REPLACE, + }, + "msgtype": "m.text", + }, + tok=self.mod_access_token, + ) + edit_event_id = res["event_id"] + + # Also send a threaded message whose root is the same as the edit's. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Message, + content={ + "msgtype": "m.text", + "body": "message 1", + "m.relates_to": { + "event_id": root_event_id, + "rel_type": RelationTypes.THREAD, + }, + }, + tok=self.mod_access_token, + ) + threaded_event_id = res["event_id"] + + # Also send a reaction, again with the same root. + res = self.helper.send_event( + room_id=self.room_id, + type=EventTypes.Reaction, + content={ + "m.relates_to": { + "rel_type": RelationTypes.ANNOTATION, + "event_id": root_event_id, + "key": "👍", + } + }, + tok=self.mod_access_token, + ) + reaction_event_id = res["event_id"] + + # Redact the root event, specifying that we also want to delete all events that + # relate to it. + self._redact_event( + self.mod_access_token, + self.room_id, + root_event_id, + with_relations=["*"], + ) + + # Check that the root event got redacted. + event_dict = self.helper.get_event( + self.room_id, root_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the edit got redacted. + event_dict = self.helper.get_event( + self.room_id, edit_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the threaded message got redacted. + event_dict = self.helper.get_event( + self.room_id, threaded_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + + # Check that the reaction got redacted. + event_dict = self.helper.get_event( + self.room_id, reaction_event_id, self.mod_access_token + ) + self.assertIn("redacted_because", event_dict, event_dict) + @override_config({"experimental_features": {"msc3912_enabled": True}}) def test_redact_relations_no_perms(self) -> None: """Tests that, when redacting a message along with its relations, if not all From d0c4257f14addbf0c9072c2e34ae1c8294716ed5 Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 2 Jun 2023 17:24:13 -0700 Subject: [PATCH 084/562] `N + 3`: Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters` (#15649) --- changelog.d/15649.misc | 1 + synapse/api/filtering.py | 4 +- synapse/handlers/account_validity.py | 2 +- synapse/handlers/admin.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/profile.py | 26 ++--- synapse/handlers/register.py | 2 +- synapse/module_api/__init__.py | 4 +- synapse/push/mailer.py | 2 +- synapse/rest/client/filter.py | 2 +- synapse/rest/client/sync.py | 2 +- synapse/storage/databases/main/filtering.py | 12 +-- synapse/storage/databases/main/profile.py | 12 +-- synapse/storage/schema/__init__.py | 5 +- .../78/01_validate_and_update_profiles.py | 92 ++++++++++++++++++ .../78/02_validate_and_update_user_filters.py | 95 +++++++++++++++++++ tests/api/test_filtering.py | 25 ++--- tests/handlers/test_profile.py | 28 ++---- tests/module_api/test_api.py | 6 +- tests/rest/client/test_filter.py | 4 +- tests/storage/test_profile.py | 17 +--- 22 files changed, 252 insertions(+), 95 deletions(-) create mode 100644 changelog.d/15649.misc create mode 100644 synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py create mode 100644 synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py diff --git a/changelog.d/15649.misc b/changelog.d/15649.misc new file mode 100644 index 0000000000..fca38abe0f --- /dev/null +++ b/changelog.d/15649.misc @@ -0,0 +1 @@ +Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 82aeef8d19..0995ecbe83 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -152,9 +152,9 @@ class Filtering: self.DEFAULT_FILTER_COLLECTION = FilterCollection(hs, {}) async def get_user_filter( - self, user_localpart: str, filter_id: Union[int, str] + self, user_id: UserID, filter_id: Union[int, str] ) -> "FilterCollection": - result = await self.store.get_user_filter(user_localpart, filter_id) + result = await self.store.get_user_filter(user_id, filter_id) return FilterCollection(self._hs, result) def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> Awaitable[int]: diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 4aa4ebf7e4..f1a7a05df6 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -164,7 +164,7 @@ class AccountValidityHandler: try: user_display_name = await self.store.get_profile_displayname( - UserID.from_string(user_id).localpart + UserID.from_string(user_id) ) if user_display_name is None: user_display_name = user_id diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index b06f25b03c..119c7f8384 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -89,7 +89,7 @@ class AdminHandler: } # Add additional user metadata - profile = await self._store.get_profileinfo(user.localpart) + profile = await self._store.get_profileinfo(user) threepids = await self._store.user_get_threepids(user.to_string()) external_ids = [ ({"auth_provider": auth_provider, "external_id": external_id}) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 4f986d90cb..59ecafa6a0 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1759,7 +1759,7 @@ class AuthHandler: return user_profile_data = await self.store.get_profileinfo( - UserID.from_string(registered_user_id).localpart + UserID.from_string(registered_user_id) ) # Store any extra attributes which will be passed in the login response. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index f299b89a1b..67adeae6a7 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -297,5 +297,5 @@ class DeactivateAccountHandler: # Add the user to the directory, if necessary. Note that # this must be done after the user is re-activated, because # deactivated users are excluded from the user directory. - profile = await self.store.get_profileinfo(user.localpart) + profile = await self.store.get_profileinfo(user) await self.user_directory_handler.handle_local_profile_change(user_id, profile) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a9160c87e3..a7f8c5e636 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -67,7 +67,7 @@ class ProfileHandler: target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): - profileinfo = await self.store.get_profileinfo(target_user.localpart) + profileinfo = await self.store.get_profileinfo(target_user) if profileinfo.display_name is None: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) @@ -99,9 +99,7 @@ class ProfileHandler: async def get_displayname(self, target_user: UserID) -> Optional[str]: if self.hs.is_mine(target_user): try: - displayname = await self.store.get_profile_displayname( - target_user.localpart - ) + displayname = await self.store.get_profile_displayname(target_user) except StoreError as e: if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) @@ -147,7 +145,7 @@ class ProfileHandler: raise AuthError(400, "Cannot set another user's displayname") if not by_admin and not self.hs.config.registration.enable_set_displayname: - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) if profile.display_name: raise SynapseError( 400, @@ -180,7 +178,7 @@ class ProfileHandler: await self.store.set_profile_displayname(target_user, displayname_to_set) - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) await self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) @@ -194,9 +192,7 @@ class ProfileHandler: async def get_avatar_url(self, target_user: UserID) -> Optional[str]: if self.hs.is_mine(target_user): try: - avatar_url = await self.store.get_profile_avatar_url( - target_user.localpart - ) + avatar_url = await self.store.get_profile_avatar_url(target_user) except StoreError as e: if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) @@ -241,7 +237,7 @@ class ProfileHandler: raise AuthError(400, "Cannot set another user's avatar_url") if not by_admin and not self.hs.config.registration.enable_set_avatar_url: - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) if profile.avatar_url: raise SynapseError( 400, "Changing avatar is disabled on this server", Codes.FORBIDDEN @@ -272,7 +268,7 @@ class ProfileHandler: await self.store.set_profile_avatar_url(target_user, avatar_url_to_set) - profile = await self.store.get_profileinfo(target_user.localpart) + profile = await self.store.get_profileinfo(target_user) await self.user_directory_handler.handle_local_profile_change( target_user.to_string(), profile ) @@ -369,14 +365,10 @@ class ProfileHandler: response = {} try: if just_field is None or just_field == "displayname": - response["displayname"] = await self.store.get_profile_displayname( - user.localpart - ) + response["displayname"] = await self.store.get_profile_displayname(user) if just_field is None or just_field == "avatar_url": - response["avatar_url"] = await self.store.get_profile_avatar_url( - user.localpart - ) + response["avatar_url"] = await self.store.get_profile_avatar_url(user) except StoreError as e: if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c80946c2e9..a2d3f03061 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -315,7 +315,7 @@ class RegistrationHandler: approved=approved, ) - profile = await self.store.get_profileinfo(localpart) + profile = await self.store.get_profileinfo(user) await self.user_directory_handler.handle_local_profile_change( user_id, profile ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a8d6224a45..84b2aef620 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -655,7 +655,9 @@ class ModuleApi: Returns: The profile information (i.e. display name and avatar URL). """ - return await self._store.get_profileinfo(localpart) + server_name = self._hs.hostname + user_id = UserID.from_string(f"@{localpart}:{server_name}") + return await self._store.get_profileinfo(user_id) async def get_threepids_for_user(self, user_id: str) -> List[Dict[str, str]]: """Look up the threepids (email addresses and phone numbers) associated with the diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 491a09b71d..79e0627b6a 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -247,7 +247,7 @@ class Mailer: try: user_display_name = await self.store.get_profile_displayname( - UserID.from_string(user_id).localpart + UserID.from_string(user_id) ) if user_display_name is None: user_display_name = user_id diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index 04561f36d7..5da1e511a2 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -58,7 +58,7 @@ class GetFilterRestServlet(RestServlet): try: filter_collection = await self.filtering.get_user_filter( - user_localpart=target_user.localpart, filter_id=filter_id_int + user_id=target_user, filter_id=filter_id_int ) except StoreError as e: if e.code != 404: diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 03b0578945..d7854ed4fd 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -178,7 +178,7 @@ class SyncRestServlet(RestServlet): else: try: filter_collection = await self.filtering.get_user_filter( - user.localpart, filter_id + user, filter_id ) except StoreError as err: if err.code != 404: diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index f777777cbf..fff417f9e3 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -145,7 +145,7 @@ class FilteringWorkerStore(SQLBaseStore): @cached(num_args=2) async def get_user_filter( - self, user_localpart: str, filter_id: Union[int, str] + self, user_id: UserID, filter_id: Union[int, str] ) -> JsonDict: # filter_id is BIGINT UNSIGNED, so if it isn't a number, fail # with a coherent error message rather than 500 M_UNKNOWN. @@ -156,7 +156,7 @@ class FilteringWorkerStore(SQLBaseStore): def_json = await self.db_pool.simple_select_one_onecol( table="user_filters", - keyvalues={"user_id": user_localpart, "filter_id": filter_id}, + keyvalues={"full_user_id": user_id.to_string(), "filter_id": filter_id}, retcol="filter_json", allow_none=False, desc="get_user_filter", @@ -172,15 +172,15 @@ class FilteringWorkerStore(SQLBaseStore): def _do_txn(txn: LoggingTransaction) -> int: sql = ( "SELECT filter_id FROM user_filters " - "WHERE user_id = ? AND filter_json = ?" + "WHERE full_user_id = ? AND filter_json = ?" ) - txn.execute(sql, (user_id.localpart, bytearray(def_json))) + txn.execute(sql, (user_id.to_string(), bytearray(def_json))) filter_id_response = txn.fetchone() if filter_id_response is not None: return filter_id_response[0] - sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?" - txn.execute(sql, (user_id.localpart,)) + sql = "SELECT MAX(filter_id) FROM user_filters WHERE full_user_id = ?" + txn.execute(sql, (user_id.to_string(),)) max_id = cast(Tuple[Optional[int]], txn.fetchone())[0] if max_id is None: filter_id = 0 diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 21d54c7a7a..3ba9cc8853 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -137,11 +137,11 @@ class ProfileWorkerStore(SQLBaseStore): return 50 - async def get_profileinfo(self, user_localpart: str) -> ProfileInfo: + async def get_profileinfo(self, user_id: UserID) -> ProfileInfo: try: profile = await self.db_pool.simple_select_one( table="profiles", - keyvalues={"user_id": user_localpart}, + keyvalues={"full_user_id": user_id.to_string()}, retcols=("displayname", "avatar_url"), desc="get_profileinfo", ) @@ -156,18 +156,18 @@ class ProfileWorkerStore(SQLBaseStore): avatar_url=profile["avatar_url"], display_name=profile["displayname"] ) - async def get_profile_displayname(self, user_localpart: str) -> Optional[str]: + async def get_profile_displayname(self, user_id: UserID) -> Optional[str]: return await self.db_pool.simple_select_one_onecol( table="profiles", - keyvalues={"user_id": user_localpart}, + keyvalues={"full_user_id": user_id.to_string()}, retcol="displayname", desc="get_profile_displayname", ) - async def get_profile_avatar_url(self, user_localpart: str) -> Optional[str]: + async def get_profile_avatar_url(self, user_id: UserID) -> Optional[str]: return await self.db_pool.simple_select_one_onecol( table="profiles", - keyvalues={"user_id": user_localpart}, + keyvalues={"full_user_id": user_id.to_string()}, retcol="avatar_url", desc="get_profile_avatar_url", ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 5cc786f030..fc190a8b13 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 77 # remember to update the list below when updating +SCHEMA_VERSION = 78 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -103,6 +103,9 @@ Changes in SCHEMA_VERSION = 76: Changes in SCHEMA_VERSION = 77 - (Postgres) Add NOT VALID CHECK (full_user_id IS NOT NULL) to tables profiles and user_filters + +Changes in SCHEMA_VERSION = 78 + - Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters """ diff --git a/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py b/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py new file mode 100644 index 0000000000..8398d8f548 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py @@ -0,0 +1,92 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine + + +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: + """ + Part 3 of a multi-step migration to drop the column `user_id` and replace it with + `full_user_id`. See the database schema docs for more information on the full + migration steps. + """ + hostname = config.server.server_name + + if isinstance(database_engine, PostgresEngine): + # check if the constraint can be validated + check_sql = """ + SELECT user_id from profiles WHERE full_user_id IS NULL + """ + cur.execute(check_sql) + res = cur.fetchall() + + if res: + # there are rows the background job missed, finish them here before we validate the constraint + process_rows_sql = """ + UPDATE profiles + SET full_user_id = '@' || user_id || ? + WHERE user_id IN ( + SELECT user_id FROM profiles WHERE full_user_id IS NULL + ) + """ + cur.execute(process_rows_sql, (f":{hostname}",)) + + # Now we can validate + validate_sql = """ + ALTER TABLE profiles VALIDATE CONSTRAINT full_user_id_not_null + """ + cur.execute(validate_sql) + + else: + # in SQLite we need to rewrite the table to add the constraint. + # First drop any temporary table that might be here from a previous failed migration. + cur.execute("DROP TABLE IF EXISTS temp_profiles") + + create_sql = """ + CREATE TABLE temp_profiles ( + full_user_id text NOT NULL, + user_id text, + displayname text, + avatar_url text, + UNIQUE (full_user_id), + UNIQUE (user_id) + ) + """ + cur.execute(create_sql) + + copy_sql = """ + INSERT INTO temp_profiles ( + user_id, + displayname, + avatar_url, + full_user_id) + SELECT user_id, displayname, avatar_url, '@' || user_id || ':' || ? FROM profiles + """ + cur.execute(copy_sql, (f"{hostname}",)) + + drop_sql = """ + DROP TABLE profiles + """ + cur.execute(drop_sql) + + rename_sql = """ + ALTER TABLE temp_profiles RENAME to profiles + """ + cur.execute(rename_sql) diff --git a/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py new file mode 100644 index 0000000000..8ef63335e7 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py @@ -0,0 +1,95 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine + + +def run_upgrade( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: + """ + Part 3 of a multi-step migration to drop the column `user_id` and replace it with + `full_user_id`. See the database schema docs for more information on the full + migration steps. + """ + hostname = config.server.server_name + + if isinstance(database_engine, PostgresEngine): + # check if the constraint can be validated + check_sql = """ + SELECT user_id from user_filters WHERE full_user_id IS NULL + """ + cur.execute(check_sql) + res = cur.fetchall() + + if res: + # there are rows the background job missed, finish them here before we validate constraint + process_rows_sql = """ + UPDATE user_filters + SET full_user_id = '@' || user_id || ? + WHERE user_id IN ( + SELECT user_id FROM user_filters WHERE full_user_id IS NULL + ) + """ + cur.execute(process_rows_sql, (f":{hostname}",)) + + # Now we can validate + validate_sql = """ + ALTER TABLE user_filters VALIDATE CONSTRAINT full_user_id_not_null + """ + cur.execute(validate_sql) + + else: + cur.execute("DROP TABLE IF EXISTS temp_user_filters") + create_sql = """ + CREATE TABLE temp_user_filters ( + full_user_id text NOT NULL, + user_id text NOT NULL, + filter_id bigint NOT NULL, + filter_json bytea NOT NULL, + UNIQUE (full_user_id), + UNIQUE (user_id) + ) + """ + cur.execute(create_sql) + + index_sql = """ + CREATE UNIQUE INDEX IF NOT EXISTS user_filters_unique ON + temp_user_filters (user_id, filter_id) + """ + cur.execute(index_sql) + + copy_sql = """ + INSERT INTO temp_user_filters ( + user_id, + filter_id, + filter_json, + full_user_id) + SELECT user_id, filter_id, filter_json, '@' || user_id || ':' || ? FROM user_filters + """ + cur.execute(copy_sql, (f"{hostname}",)) + + drop_sql = """ + DROP TABLE user_filters + """ + cur.execute(drop_sql) + + rename_sql = """ + ALTER TABLE temp_user_filters RENAME to user_filters + """ + cur.execute(rename_sql) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index aa6af5ad7b..868f0c6995 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -35,7 +35,6 @@ from tests.events.test_utils import MockEvent user_id = UserID.from_string("@test_user:test") user2_id = UserID.from_string("@test_user2:test") -user_localpart = "test_user" class FilteringTestCase(unittest.HomeserverTestCase): @@ -449,9 +448,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): ] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_presence(presence_states)) @@ -479,9 +476,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): ] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart + "2", filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user2_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_presence(presence_states)) @@ -498,9 +493,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): events = [event] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_room_state(events=events)) @@ -519,9 +512,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): events = [event] user_filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) results = self.get_success(user_filter.filter_room_state(events)) @@ -603,9 +594,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): user_filter_json, ( self.get_success( - self.datastore.get_user_filter( - user_localpart=user_localpart, filter_id=0 - ) + self.datastore.get_user_filter(user_id=user_id, filter_id=0) ) ), ) @@ -620,9 +609,7 @@ class FilteringTestCase(unittest.HomeserverTestCase): ) filter = self.get_success( - self.filtering.get_user_filter( - user_localpart=user_localpart, filter_id=filter_id - ) + self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id) ) self.assertEqual(filter.get_filter_json(), user_filter_json) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 64a9a22afe..196ceb0b82 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -80,11 +80,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - ( - self.get_success( - self.store.get_profile_displayname(self.frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.frank))), "Frank Jr.", ) @@ -96,11 +92,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - ( - self.get_success( - self.store.get_profile_displayname(self.frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.frank))), "Frank", ) @@ -112,7 +104,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertIsNone( - self.get_success(self.store.get_profile_displayname(self.frank.localpart)) + self.get_success(self.store.get_profile_displayname(self.frank)) ) def test_set_my_name_if_disabled(self) -> None: @@ -122,11 +114,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.get_success(self.store.set_profile_displayname(self.frank, "Frank")) self.assertEqual( - ( - self.get_success( - self.store.get_profile_displayname(self.frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.frank))), "Frank", ) @@ -201,7 +189,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), "http://my.server/pic.gif", ) @@ -215,7 +203,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), "http://my.server/me.png", ) @@ -229,7 +217,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertIsNone( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), ) def test_set_my_avatar_if_disabled(self) -> None: @@ -241,7 +229,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) self.assertEqual( - (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), + (self.get_success(self.store.get_profile_avatar_url(self.frank))), "http://my.server/me.png", ) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index bff7114cd8..b3310abe1b 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -28,7 +28,7 @@ from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login, notifications, presence, profile, room from synapse.server import HomeServer -from synapse.types import JsonDict, create_requester +from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from tests.events.test_presence_router import send_presence_update, sync_presence @@ -103,7 +103,9 @@ class ModuleApiTestCase(BaseModuleApiTestCase): self.assertEqual(email["added_at"], 0) # Check that the displayname was assigned - displayname = self.get_success(self.store.get_profile_displayname("bob")) + displayname = self.get_success( + self.store.get_profile_displayname(UserID.from_string("@bob:test")) + ) self.assertEqual(displayname, "Bobberino") def test_can_register_admin_user(self) -> None: diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 9faa9de050..a2d5d340be 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -46,7 +46,9 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, {"filter_id": "0"}) filter = self.get_success( - self.store.get_user_filter(user_localpart="apple", filter_id=0) + self.store.get_user_filter( + user_id=UserID.from_string(FilterTestCase.user_id), filter_id=0 + ) ) self.pump() self.assertEqual(filter, self.EXAMPLE_FILTER) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index f9cf0fcb82..fe5bb77913 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer @@ -35,18 +36,14 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.assertEqual( "Frank", - ( - self.get_success( - self.store.get_profile_displayname(self.u_frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_displayname(self.u_frank))), ) # test set to None self.get_success(self.store.set_profile_displayname(self.u_frank, None)) self.assertIsNone( - self.get_success(self.store.get_profile_displayname(self.u_frank.localpart)) + self.get_success(self.store.get_profile_displayname(self.u_frank)) ) def test_avatar_url(self) -> None: @@ -58,18 +55,14 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.assertEqual( "http://my.site/here", - ( - self.get_success( - self.store.get_profile_avatar_url(self.u_frank.localpart) - ) - ), + (self.get_success(self.store.get_profile_avatar_url(self.u_frank))), ) # test set to None self.get_success(self.store.set_profile_avatar_url(self.u_frank, None)) self.assertIsNone( - self.get_success(self.store.get_profile_avatar_url(self.u_frank.localpart)) + self.get_success(self.store.get_profile_avatar_url(self.u_frank)) ) def test_profiles_bg_migration(self) -> None: From 8ba530c0e3b157137031d456225b7ba1e0b1627d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:31:41 +0100 Subject: [PATCH 085/562] Bump importlib-metadata from 6.1.0 to 6.6.0 (#15711) Bumps [importlib-metadata](https://github.com/python/importlib_metadata) from 6.1.0 to 6.6.0. - [Release notes](https://github.com/python/importlib_metadata/releases) - [Changelog](https://github.com/python/importlib_metadata/blob/main/CHANGES.rst) - [Commits](https://github.com/python/importlib_metadata/compare/v6.1.0...v6.6.0) --- updated-dependencies: - dependency-name: importlib-metadata dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index d8964f5719..180f274087 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry and should not be changed by hand. [[package]] name = "alabaster" @@ -867,14 +867,14 @@ files = [ [[package]] name = "importlib-metadata" -version = "6.1.0" +version = "6.6.0" description = "Read metadata from Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"}, - {file = "importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"}, + {file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"}, + {file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"}, ] [package.dependencies] @@ -3424,18 +3424,18 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["Pympler", "authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pyicu", "pysaml2", "sentry-sdk", "txredisapi"] +all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"] cache-memory = ["Pympler"] jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] opentracing = ["jaeger-client", "opentracing"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] -redis = ["hiredis", "txredisapi"] +redis = ["txredisapi", "hiredis"] saml2 = ["pysaml2"] sentry = ["sentry-sdk"] systemd = ["systemd-python"] -test = ["idna", "parameterized"] +test = ["parameterized", "idna"] url-preview = ["lxml"] user-search = ["pyicu"] From 36a5bcae2cf70f5b7dec44e34c10d7e47ee0bcc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:31:54 +0100 Subject: [PATCH 086/562] Bump library/redis from 6-bullseye to 7-bullseye in /docker (#15712) Bumps library/redis from 6-bullseye to 7-bullseye. --- updated-dependencies: - dependency-name: library/redis dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docker/Dockerfile-workers | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index adb9a725e3..31d6d33407 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -21,7 +21,7 @@ FROM docker.io/library/debian:bullseye-slim AS deps_base # which makes it much easier to copy (but we need to make sure we use an image # based on the same debian version as the synapse image, to make sure we get # the expected version of libc. -FROM docker.io/library/redis:6-bullseye AS redis_base +FROM docker.io/library/redis:7-bullseye AS redis_base # now build the final image, based on the the regular Synapse docker image FROM $FROM From 5feabbdf062d16577f697fed41687c7bffc60c49 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:32:07 +0100 Subject: [PATCH 087/562] Bump pyasn1 from 0.4.8 to 0.5.0 (#15713) Bumps [pyasn1](https://github.com/pyasn1/pyasn1) from 0.4.8 to 0.5.0. - [Release notes](https://github.com/pyasn1/pyasn1/releases) - [Changelog](https://github.com/pyasn1/pyasn1/blob/main/CHANGES.rst) - [Commits](https://github.com/pyasn1/pyasn1/compare/v0.4.8...v0.5.0) --- updated-dependencies: - dependency-name: pyasn1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 180f274087..d2fc2c1c9c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1863,14 +1863,14 @@ psycopg2 = "*" [[package]] name = "pyasn1" -version = "0.4.8" -description = "ASN.1 types and codecs" +version = "0.5.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" category = "main" optional = false -python-versions = "*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, - {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, + {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"}, + {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"}, ] [[package]] From 1a7aa81715609555cb4d0a7e3cad262b9c234007 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:32:16 +0100 Subject: [PATCH 088/562] Bump sentry-sdk from 1.22.1 to 1.25.0 (#15714) Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.22.1 to 1.25.0. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.22.1...1.25.0) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index d2fc2c1c9c..9f91857475 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2397,19 +2397,19 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.22.1" +version = "1.25.0" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.22.1.tar.gz", hash = "sha256:052dff5069c6f0d836ee014323576824a9b40836fc003fb12489a1f19c60a3c9"}, - {file = "sentry_sdk-1.22.1-py2.py3-none-any.whl", hash = "sha256:c6c6946f8c927adb00af1c5ab6921df38775b2199b9003816d5935a1310352d5"}, + {file = "sentry-sdk-1.25.0.tar.gz", hash = "sha256:5be3296fc574fa8a4d9b213b4dcf8c8d0246c08f8bd78315c6286f386c37555a"}, + {file = "sentry_sdk-1.25.0-py2.py3-none-any.whl", hash = "sha256:fe85cf5d0b3d0aa3480df689f9f6dc487de783defb0a95043368375dc893645e"}, ] [package.dependencies] certifi = "*" -urllib3 = {version = ">=1.26.11,<2.0.0", markers = "python_version >= \"3.6\""} +urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] @@ -2421,10 +2421,11 @@ chalice = ["chalice (>=1.16.0)"] django = ["django (>=1.8)"] falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] -flask = ["blinker (>=1.1)", "flask (>=0.11)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] grpcio = ["grpcio (>=1.21.1)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] +loguru = ["loguru (>=0.5)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] From 2d97d5b1c359c2a1783365c0db035f17d512dc4c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:32:25 +0100 Subject: [PATCH 089/562] Bump types-jsonschema from 4.17.0.7 to 4.17.0.8 (#15716) Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.17.0.7 to 4.17.0.8. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9f91857475..c94daa6cef 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3038,14 +3038,14 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.17.0.7" +version = "4.17.0.8" description = "Typing stubs for jsonschema" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-jsonschema-4.17.0.7.tar.gz", hash = "sha256:130e57c5f1ca755f95775d0822ad7a3907294e1461306af54baf804f317fd54c"}, - {file = "types_jsonschema-4.17.0.7-py3-none-any.whl", hash = "sha256:e129b52be6df841d97a98f087631dd558f7812eb91ff7b733c3301bd2446271b"}, + {file = "types-jsonschema-4.17.0.8.tar.gz", hash = "sha256:96a56990910f405e62de58862c0bbb3ac29ee6dba6d3d99aa0ba7f874cc547de"}, + {file = "types_jsonschema-4.17.0.8-py3-none-any.whl", hash = "sha256:f5958eb7b53217dfb5125f0412aeaef226a8a9013eac95816c95b5b523f6796b"}, ] [[package]] From ca8906be2cb821a0fb49ad1adf8440e79e64a398 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 10:39:34 +0100 Subject: [PATCH 090/562] Bump types-requests from 2.31.0.0 to 2.31.0.1 (#15715) Bumps [types-requests](https://github.com/python/typeshed) from 2.31.0.0 to 2.31.0.1. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index c94daa6cef..1f5cb3a3a8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3125,14 +3125,14 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.0" +version = "2.31.0.1" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.31.0.0.tar.gz", hash = "sha256:c1c29d20ab8d84dff468d7febfe8e0cb0b4664543221b386605e14672b44ea25"}, - {file = "types_requests-2.31.0.0-py3-none-any.whl", hash = "sha256:7c5cea7940f8e92ec560bbc468f65bf684aa3dcf0554a6f8c4710f5f708dc598"}, + {file = "types-requests-2.31.0.1.tar.gz", hash = "sha256:3de667cffa123ce698591de0ad7db034a5317457a596eb0b4944e5a9d9e8d1ac"}, + {file = "types_requests-2.31.0.1-py3-none-any.whl", hash = "sha256:afb06ef8f25ba83d59a1d424bd7a5a939082f94b94e90ab5e6116bd2559deaa3"}, ] [package.dependencies] From f9561b9e37e4cbd97a71dd10549f1f03d3f01b5e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 5 Jun 2023 23:38:52 -0500 Subject: [PATCH 091/562] Some house keeping on `maybe_backfill()` functions (#15709) --- changelog.d/15709.misc | 1 + synapse/handlers/federation.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/15709.misc diff --git a/changelog.d/15709.misc b/changelog.d/15709.misc new file mode 100644 index 0000000000..e9ce84a940 --- /dev/null +++ b/changelog.d/15709.misc @@ -0,0 +1 @@ +Update docstring and traces on `maybe_backfill()` functions. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2eb28d55ac..57d6b70cff 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -200,6 +200,7 @@ class FederationHandler: ) @trace + @tag_args async def maybe_backfill( self, room_id: str, current_depth: int, limit: int ) -> bool: @@ -214,6 +215,9 @@ class FederationHandler: limit: The number of events that the pagination request will return. This is used as part of the heuristic to decide if we should back paginate. + + Returns: + True if we actually tried to backfill something, otherwise False. """ # Starting the processing time here so we can include the room backfill # linearizer lock queue in the timing @@ -227,6 +231,8 @@ class FederationHandler: processing_start_time=processing_start_time, ) + @trace + @tag_args async def _maybe_backfill_inner( self, room_id: str, @@ -247,6 +253,9 @@ class FederationHandler: limit: The max number of events to request from the remote federated server. processing_start_time: The time when `maybe_backfill` started processing. Only used for timing. If `None`, no timing observation will be made. + + Returns: + True if we actually tried to backfill something, otherwise False. """ backwards_extremities = [ _BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY) @@ -302,6 +311,14 @@ class FederationHandler: len(sorted_backfill_points), sorted_backfill_points, ) + set_tag( + SynapseTags.RESULT_PREFIX + "sorted_backfill_points", + str(sorted_backfill_points), + ) + set_tag( + SynapseTags.RESULT_PREFIX + "sorted_backfill_points.length", + str(len(sorted_backfill_points)), + ) # If we have no backfill points lower than the `current_depth` then # either we can a) bail or b) still attempt to backfill. We opt to try From f880e64b11bd03d1ebd710b34b541d5b2e044baa Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 6 Jun 2023 04:11:07 -0400 Subject: [PATCH 092/562] Stabilize support for MSC3952: Intentional mentions. (#15520) --- changelog.d/15520.feature | 1 + rust/benches/evaluator.rs | 3 -- rust/src/push/base_rules.rs | 8 ++--- rust/src/push/evaluator.rs | 10 +++--- rust/src/push/mod.rs | 7 ----- stubs/synapse/synapse_rust/push.pyi | 1 - synapse/api/constants.py | 2 +- synapse/config/experimental.py | 5 --- synapse/events/validator.py | 9 ++---- synapse/push/bulk_push_rule_evaluator.py | 8 +---- synapse/rest/client/versions.py | 2 -- synapse/storage/databases/main/push_rule.py | 1 - tests/push/test_bulk_push_rule_evaluator.py | 34 ++++++++------------- 13 files changed, 27 insertions(+), 64 deletions(-) create mode 100644 changelog.d/15520.feature diff --git a/changelog.d/15520.feature b/changelog.d/15520.feature new file mode 100644 index 0000000000..f4fd40ab94 --- /dev/null +++ b/changelog.d/15520.feature @@ -0,0 +1 @@ +Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 64e13f6486..c2f33258a4 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -13,8 +13,6 @@ // limitations under the License. #![feature(test)] -use std::collections::BTreeSet; - use synapse::push::{ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue, PushRules, SimpleJsonValue, @@ -197,7 +195,6 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, - false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 51372e1553..9d6c304d92 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -142,11 +142,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"), + rule_id: Cow::Borrowed("global/override/.m.is_user_mention"), priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known( KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition { - key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.user_ids"), + key: Cow::Borrowed("content.m\\.mentions.user_ids"), value_type: Cow::Borrowed(&EventMatchPatternType::UserId), }), )]), @@ -163,11 +163,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"), + rule_id: Cow::Borrowed("global/override/.m.is_room_mention"), priority_class: 5, conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { - key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.room"), + key: Cow::Borrowed("content.m\\.mentions.room"), value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), })), Condition::Known(KnownCondition::SenderNotificationPermission { diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 2d7c4c06be..59c53b1776 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -70,7 +70,9 @@ pub struct PushRuleEvaluator { /// The "content.body", if any. body: String, - /// True if the event has a mentions property and MSC3952 support is enabled. + /// True if the event has a m.mentions property. (Note that this is a separate + /// flag instead of checking flattened_keys since the m.mentions property + /// might be an empty map and not appear in flattened_keys. has_mentions: bool, /// The number of users in the room. @@ -155,9 +157,7 @@ impl PushRuleEvaluator { let rule_id = &push_rule.rule_id().to_string(); // For backwards-compatibility the legacy mention rules are disabled - // if the event contains the 'm.mentions' property (and if the - // experimental feature is enabled, both of these are represented - // by the has_mentions flag). + // if the event contains the 'm.mentions' property. if self.has_mentions && (rule_id == "global/override/.m.rule.contains_display_name" || rule_id == "global/content/.m.rule.contains_user_name" @@ -562,7 +562,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index f19d3c739f..514980579b 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -527,7 +527,6 @@ pub struct FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3952_intentional_mentions: bool, msc3958_suppress_edits_enabled: bool, } @@ -540,7 +539,6 @@ impl FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3952_intentional_mentions: bool, msc3958_suppress_edits_enabled: bool, ) -> Self { Self { @@ -549,7 +547,6 @@ impl FilteredPushRules { msc1767_enabled, msc3381_polls_enabled, msc3664_enabled, - msc3952_intentional_mentions, msc3958_suppress_edits_enabled, } } @@ -587,10 +584,6 @@ impl FilteredPushRules { return false; } - if !self.msc3952_intentional_mentions && rule.rule_id.contains("org.matrix.msc3952") - { - return false; - } if !self.msc3958_suppress_edits_enabled && rule.rule_id == "global/override/.com.beeper.suppress_edits" { diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 5d0ce4b1a4..d573a37b9a 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -46,7 +46,6 @@ class FilteredPushRules: msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3952_intentional_mentions: bool, msc3958_suppress_edits_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... diff --git a/synapse/api/constants.py b/synapse/api/constants.py index cde9a2ecef..faf0770c66 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -236,7 +236,7 @@ class EventContentFields: AUTHORISING_USER: Final = "join_authorised_via_users_server" # Use for mentioning users. - MSC3952_MENTIONS: Final = "org.matrix.msc3952.mentions" + MENTIONS: Final = "m.mentions" # an unspecced field added to to-device messages to identify them uniquely-ish TO_DEVICE_MSGID: Final = "org.matrix.msgid" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index a9e002cf08..1d5b5ded45 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -358,11 +358,6 @@ class ExperimentalConfig(Config): # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) - # MSC3952: Intentional mentions, this depends on MSC3966. - self.msc3952_intentional_mentions = experimental.get( - "msc3952_intentional_mentions", False - ) - # MSC3959: Do not generate notifications for edits. self.msc3958_supress_edit_notifs = experimental.get( "msc3958_supress_edit_notifs", False diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 47203209db..9278f1a1aa 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -134,13 +134,8 @@ class EventValidator: ) # If the event contains a mentions key, validate it. - if ( - EventContentFields.MSC3952_MENTIONS in event.content - and config.experimental.msc3952_intentional_mentions - ): - validate_json_object( - event.content[EventContentFields.MSC3952_MENTIONS], Mentions - ) + if EventContentFields.MENTIONS in event.content: + validate_json_object(event.content[EventContentFields.MENTIONS], Mentions) def _validate_retention(self, event: EventBase) -> None: """Checks that an event that defines the retention policy for a room respects the diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 320084f5f5..33002cc0f2 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -120,9 +120,6 @@ class BulkPushRuleEvaluator: self.should_calculate_push_rules = self.hs.config.push.enable_push self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled - self._intentional_mentions_enabled = ( - self.hs.config.experimental.msc3952_intentional_mentions - ) self.room_push_rule_cache_metrics = register_cache( "cache", @@ -390,10 +387,7 @@ class BulkPushRuleEvaluator: del notification_levels[key] # Pull out any user and room mentions. - has_mentions = ( - self._intentional_mentions_enabled - and EventContentFields.MSC3952_MENTIONS in event.content - ) + has_mentions = EventContentFields.MENTIONS in event.content evaluator = PushRuleEvaluator( _flatten_dict(event), diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 547bf34df1..1910648755 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -124,8 +124,6 @@ class VersionsRestServlet(RestServlet): is not None, # Adds support for relation-based redactions as per MSC3912. "org.matrix.msc3912": self.config.experimental.msc3912_enabled, - # Adds support for unstable "intentional mentions" behaviour. - "org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions, # Whether recursively provide relations is supported. "org.matrix.msc3981": self.config.experimental.msc3981_recurse_relations, # Adds support for deleting account data. diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 9f862f00c1..e098ceea3c 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -88,7 +88,6 @@ def _load_rules( msc1767_enabled=experimental_config.msc1767_enabled, msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, - msc3952_intentional_mentions=experimental_config.msc3952_intentional_mentions, msc3958_suppress_edits_enabled=experimental_config.msc3958_supress_edit_notifs, ) diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 9501096a77..1e06f86071 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -228,7 +228,6 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): ) return len(result) > 0 - @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) def test_user_mentions(self) -> None: """Test the behavior of an event which includes invalid user mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) @@ -237,9 +236,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertFalse(self._create_and_process(bulk_evaluator)) # An empty mentions field should not notify. self.assertFalse( - self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {}} - ) + self._create_and_process(bulk_evaluator, {EventContentFields.MENTIONS: {}}) ) # Non-dict mentions should be ignored. @@ -253,7 +250,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): for mentions in (None, True, False, 1, "foo", []): self.assertFalse( self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: mentions} + bulk_evaluator, {EventContentFields.MENTIONS: mentions} ) ) @@ -262,7 +259,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertFalse( self._create_and_process( bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"user_ids": mentions}}, + {EventContentFields.MENTIONS: {"user_ids": mentions}}, ) ) @@ -270,14 +267,14 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertTrue( self._create_and_process( bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"user_ids": [self.alice]}}, + {EventContentFields.MENTIONS: {"user_ids": [self.alice]}}, ) ) self.assertTrue( self._create_and_process( bulk_evaluator, { - EventContentFields.MSC3952_MENTIONS: { + EventContentFields.MENTIONS: { "user_ids": ["@another:test", self.alice] } }, @@ -288,11 +285,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertTrue( self._create_and_process( bulk_evaluator, - { - EventContentFields.MSC3952_MENTIONS: { - "user_ids": [self.alice, self.alice] - } - }, + {EventContentFields.MENTIONS: {"user_ids": [self.alice, self.alice]}}, ) ) @@ -307,7 +300,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self._create_and_process( bulk_evaluator, { - EventContentFields.MSC3952_MENTIONS: { + EventContentFields.MENTIONS: { "user_ids": [None, True, False, {}, []] } }, @@ -317,7 +310,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self._create_and_process( bulk_evaluator, { - EventContentFields.MSC3952_MENTIONS: { + EventContentFields.MENTIONS: { "user_ids": [None, True, False, {}, [], self.alice] } }, @@ -331,12 +324,11 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): { "body": self.alice, "msgtype": "m.text", - EventContentFields.MSC3952_MENTIONS: {}, + EventContentFields.MENTIONS: {}, }, ) ) - @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) def test_room_mentions(self) -> None: """Test the behavior of an event which includes invalid room mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) @@ -344,7 +336,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): # Room mentions from those without power should not notify. self.assertFalse( self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}} + bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}} ) ) @@ -358,7 +350,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): ) self.assertTrue( self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}} + bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}} ) ) @@ -374,7 +366,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): self.assertFalse( self._create_and_process( bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"room": mentions}}, + {EventContentFields.MENTIONS: {"room": mentions}}, ) ) @@ -385,7 +377,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): { "body": "@room", "msgtype": "m.text", - EventContentFields.MSC3952_MENTIONS: {}, + EventContentFields.MENTIONS: {}, }, ) ) From 564f37aca6fdf404edc65031f90bbf9385794ae2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Jun 2023 09:55:42 +0100 Subject: [PATCH 093/562] 1.85.0 --- CHANGES.md | 21 +++++++++++++++++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f0885a2f1e..100ce99270 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.85.0 (2023-06-06) +=========================== + +No significant changes since 1.85.0rc2. + + +## Security advisory + +The following issues are fixed in 1.85.0. + +- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity + + It may be possible for a deactivated user to login when using uncommon configurations. + +- [GHSA-98px-6486-j7qc](https://github.com/matrix-org/synapse/security/advisories/GHSA-98px-6486-j7qc) / [CVE-2023-32683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity + + A discovered oEmbed or image URL can bypass the `url_preview_url_blacklist` setting potentially allowing server side request forgery or bypassing network policies. Impact is limited to IP addresses allowed by the `url_preview_ip_range_blacklist` setting (by default this only allows public IPs). + +See the advisories for more details. If you have any questions, email security@matrix.org. + + Synapse 1.85.0rc2 (2023-06-01) ============================== diff --git a/debian/changelog b/debian/changelog index ae348ce4df..2278a83283 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.0) stable; urgency=medium + + * New Synapse release 1.85.0. + + -- Synapse Packaging team Tue, 06 Jun 2023 09:39:29 +0100 + matrix-synapse-py3 (1.85.0~rc2) stable; urgency=medium * New Synapse release 1.85.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 4ed4214f34..745b58d7b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.0rc2" +version = "1.85.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From ec71214243eac58a4a6d272c15441a6405f6ae9c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Jun 2023 10:06:21 +0100 Subject: [PATCH 094/562] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 100ce99270..ea13b554ba 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ No significant changes since 1.85.0rc2. ## Security advisory -The following issues are fixed in 1.85.0. +The following issues are fixed in 1.85.0 (and RCs). - [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity From ad690037de0708d932380e3759d57ef3cc981345 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Jun 2023 10:58:32 +0100 Subject: [PATCH 095/562] Fix link in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ea13b554ba..905713b2af 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,7 +8,7 @@ No significant changes since 1.85.0rc2. The following issues are fixed in 1.85.0 (and RCs). -- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity +- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32682) — Low Severity It may be possible for a deactivated user to login when using uncommon configurations. From dfd77f426e3e4a66dd027db7078ed0345a4c74dd Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 6 Jun 2023 12:32:29 +0100 Subject: [PATCH 096/562] Remove some unused `server_name` fields (#15723) Signed-off-by: Sean Quah --- changelog.d/15723.misc | 1 + synapse/handlers/presence.py | 1 - synapse/handlers/read_marker.py | 1 - synapse/handlers/room.py | 1 - synapse/handlers/stats.py | 1 - synapse/rest/media/upload_resource.py | 1 - 6 files changed, 1 insertion(+), 5 deletions(-) create mode 100644 changelog.d/15723.misc diff --git a/changelog.d/15723.misc b/changelog.d/15723.misc new file mode 100644 index 0000000000..ba331adca7 --- /dev/null +++ b/changelog.d/15723.misc @@ -0,0 +1 @@ +Removed some unused fields. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 4ad2233573..0a219b7962 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -648,7 +648,6 @@ class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs - self.server_name = hs.hostname self.wheel_timer: WheelTimer[str] = WheelTimer() self.notifier = hs.get_notifier() self._presence_enabled = hs.config.server.use_presence diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index 49a497a860..df5a4f3e22 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -27,7 +27,6 @@ logger = logging.getLogger(__name__) class ReadMarkerHandler: def __init__(self, hs: "HomeServer"): - self.server_name = hs.config.server.server_name self.store = hs.get_datastores().main self.account_data_handler = hs.get_account_data_handler() self.read_marker_linearizer = Linearizer(name="read_marker") diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 5e1702d78a..cb957f2033 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1490,7 +1490,6 @@ class RoomContextHandler: class TimestampLookupHandler: def __init__(self, hs: "HomeServer"): - self.server_name = hs.hostname self.store = hs.get_datastores().main self.state_handler = hs.get_state_handler() self.federation_client = hs.get_federation_client() diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 5c01482acf..7cabf7980a 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -42,7 +42,6 @@ class StatsHandler: self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self.state = hs.get_state_handler() - self.server_name = hs.hostname self.clock = hs.get_clock() self.notifier = hs.get_notifier() self.is_mine_id = hs.is_mine_id diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 697348613b..043e8d6077 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -39,7 +39,6 @@ class UploadResource(DirectServeJsonResource): self.filepaths = media_repo.filepaths self.store = hs.get_datastores().main self.clock = hs.get_clock() - self.server_name = hs.hostname self.auth = hs.get_auth() self.max_upload_size = hs.config.media.max_upload_size self.clock = hs.get_clock() From d43c72a6c85ab7cf7391f1b716dfd57f8fd0bf3d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Jun 2023 19:29:54 +0100 Subject: [PATCH 097/562] Prevent "twisted trunk" and "latest deps" workflows from running on forks (#15726) --- .github/workflows/latest_deps.yml | 23 +++++++++++++++++++++-- .github/workflows/twisted_trunk.yml | 24 ++++++++++++++++++++++-- changelog.d/15726.misc | 1 + 3 files changed, 44 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15726.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 452600ba16..ec6391cf8f 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -22,7 +22,21 @@ concurrency: cancel-in-progress: true jobs: + check_repo: + # Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is + # only useful to the Synapse core team. + # All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest + # of the workflow will be skipped as well. + runs-on: ubuntu-latest + outputs: + should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }} + steps: + - id: check_condition + run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT" + mypy: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -47,6 +61,8 @@ jobs: run: sed '/warn_unused_ignores = True/d' -i mypy.ini - run: poetry run mypy trial: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest strategy: matrix: @@ -105,6 +121,8 @@ jobs: sytest: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: image: matrixdotorg/sytest-synapse:testing @@ -156,7 +174,8 @@ jobs: complement: - if: "${{ !failure() && !cancelled() }}" + needs: check_repo + if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'" runs-on: ubuntu-latest strategy: @@ -192,7 +211,7 @@ jobs: # Open an issue if the build fails, so we know about it. # Only do this if we're not experimenting with this action in a PR. open-issue: - if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'" + if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request' && needs.check_repo.outputs.should_run_workflow == 'true'" needs: # TODO: should mypy be included here? It feels more brittle than the others. - mypy diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 14fc6a0389..55081f8133 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -18,7 +18,22 @@ concurrency: cancel-in-progress: true jobs: + check_repo: + # Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is + # only useful to the Synapse core team. + # All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest + # of the workflow will be skipped as well. + if: github.repository == 'matrix-org/synapse' + runs-on: ubuntu-latest + outputs: + should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }} + steps: + - id: check_condition + run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT" + mypy: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: @@ -41,6 +56,8 @@ jobs: - run: poetry run mypy trial: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: @@ -75,6 +92,8 @@ jobs: || true sytest: + needs: check_repo + if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: image: matrixdotorg/sytest-synapse:buster @@ -119,7 +138,8 @@ jobs: /logs/**/*.log* complement: - if: "${{ !failure() && !cancelled() }}" + needs: check_repo + if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'" runs-on: ubuntu-latest strategy: @@ -166,7 +186,7 @@ jobs: # open an issue if the build fails, so we know about it. open-issue: - if: failure() + if: failure() && needs.check_repo.outputs.should_run_workflow == 'true' needs: - mypy - trial diff --git a/changelog.d/15726.misc b/changelog.d/15726.misc new file mode 100644 index 0000000000..941e541e77 --- /dev/null +++ b/changelog.d/15726.misc @@ -0,0 +1 @@ +Prevent the `latest_deps` and `twisted_trunk` daily GitHub Actions workflows from running on forks of the codebase. \ No newline at end of file From 6ee96e936646d6ccc55dc076f62f8cf518c90d1e Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 6 Jun 2023 13:16:03 -0700 Subject: [PATCH 098/562] Improve performance of user directory search (#15729) --- changelog.d/15729.misc | 1 + synapse/storage/databases/main/user_directory.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15729.misc diff --git a/changelog.d/15729.misc b/changelog.d/15729.misc new file mode 100644 index 0000000000..3940254305 --- /dev/null +++ b/changelog.d/15729.misc @@ -0,0 +1 @@ +Improve performance of user directory search. diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index a0319575f0..b0a06baf4f 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -1061,12 +1061,15 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): # The array of numbers are the weights for the various part of the # search: (domain, _, display name, localpart) sql = """ + WITH matching_users AS ( + SELECT user_id, vector FROM user_directory_search WHERE vector @@ to_tsquery('simple', ?) + LIMIT 10000 + ) SELECT d.user_id AS user_id, display_name, avatar_url - FROM user_directory_search as t + FROM matching_users as t INNER JOIN user_directory AS d USING (user_id) WHERE %(where_clause)s - AND vector @@ to_tsquery('simple', ?) ORDER BY (CASE WHEN d.user_id IS NOT NULL THEN 4.0 ELSE 1.0 END) * (CASE WHEN display_name IS NOT NULL THEN 1.2 ELSE 1.0 END) @@ -1095,8 +1098,9 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): "order_case_statements": " ".join(additional_ordering_statements), } args = ( - join_args - + (full_query, exact_query, prefix_query) + (full_query,) + + join_args + + (exact_query, prefix_query) + ordering_arguments + (limit + 1,) ) From 33c3550887f412f015cf651db82a9082bb12cd9e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 16:25:03 -0500 Subject: [PATCH 099/562] Add context for when/why to use the `long_retries` option when sending Federation requests (#15721) --- changelog.d/15721.misc | 1 + synapse/http/matrixfederationclient.py | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15721.misc diff --git a/changelog.d/15721.misc b/changelog.d/15721.misc new file mode 100644 index 0000000000..f4d892daf9 --- /dev/null +++ b/changelog.d/15721.misc @@ -0,0 +1 @@ +Add context for when/why to use the `long_retries` option when sending Federation requests. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 9094dab0fe..abb5ae5815 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -499,8 +499,15 @@ class MatrixFederationHttpClient: Note that the above intervals are *in addition* to the time spent waiting for the request to complete (up to `timeout` ms). - NB: the long retry algorithm takes over 20 minutes to complete, with - a default timeout of 60s! + NB: the long retry algorithm takes over 20 minutes to complete, with a + default timeout of 60s! It's best not to use the `long_retries` option + for something that is blocking a client so we don't make them wait for + aaaaages, whereas some things like sending transactions (server to + server) we can be a lot more lenient but its very fuzzy / hand-wavey. + + In the future, we could be more intelligent about doing this sort of + thing by looking at things with the bigger picture in mind, + https://github.com/matrix-org/synapse/issues/8917 ignore_backoff: true to ignore the historical backoff data and try the request anyway. From 4e6390cb10676d3f621319663587f49baa57bedc Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 16:26:12 -0500 Subject: [PATCH 100/562] Update error to more plainly explain we can only authorize our own events (#15725) --- changelog.d/15725.misc | 1 + synapse/federation/federation_server.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15725.misc diff --git a/changelog.d/15725.misc b/changelog.d/15725.misc new file mode 100644 index 0000000000..6c7a8a41d8 --- /dev/null +++ b/changelog.d/15725.misc @@ -0,0 +1 @@ +Update federation error to more plainly explain we can only authorize our own membership events. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 149351dda0..9425b32507 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -944,7 +944,7 @@ class FederationServer(FederationBase): if not self._is_mine_server_name(authorising_server): raise SynapseError( 400, - f"Cannot authorise request from resident server: {authorising_server}", + f"Cannot authorise membership event for {authorising_server}. We can only authorise requests from our own homeserver", ) event.signatures.update( From 8bfded81f3378ab6333f174e182f2aae6ef01f49 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 17:39:22 -0500 Subject: [PATCH 101/562] Trace functions which return `Awaitable` (#15650) --- changelog.d/15650.misc | 1 + synapse/logging/opentracing.py | 37 ++++++++++++++++++-------- tests/logging/test_opentracing.py | 43 +++++++++++++++++++++++-------- 3 files changed, 59 insertions(+), 22 deletions(-) create mode 100644 changelog.d/15650.misc diff --git a/changelog.d/15650.misc b/changelog.d/15650.misc new file mode 100644 index 0000000000..9bbad113e1 --- /dev/null +++ b/changelog.d/15650.misc @@ -0,0 +1 @@ +Add support for tracing functions which return `Awaitable`s. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index c70eee649c..75217e3f45 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -171,6 +171,7 @@ from functools import wraps from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, Collection, ContextManager, @@ -903,6 +904,7 @@ def _custom_sync_async_decorator( """ if inspect.iscoroutinefunction(func): + # For this branch, we handle async functions like `async def func() -> RInner`. # In this branch, R = Awaitable[RInner], for some other type RInner @wraps(func) async def _wrapper( @@ -914,15 +916,16 @@ def _custom_sync_async_decorator( return await func(*args, **kwargs) # type: ignore[misc] else: - # The other case here handles both sync functions and those - # decorated with inlineDeferred. + # The other case here handles sync functions including those decorated with + # `@defer.inlineCallbacks` or that return a `Deferred` or other `Awaitable`. @wraps(func) - def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + def _wrapper(*args: P.args, **kwargs: P.kwargs) -> Any: scope = wrapping_logic(func, *args, **kwargs) scope.__enter__() try: result = func(*args, **kwargs) + if isinstance(result, defer.Deferred): def call_back(result: R) -> R: @@ -930,20 +933,32 @@ def _custom_sync_async_decorator( return result def err_back(result: R) -> R: + # TODO: Pass the error details into `scope.__exit__(...)` for + # consistency with the other paths. scope.__exit__(None, None, None) return result result.addCallbacks(call_back, err_back) - else: - if inspect.isawaitable(result): - logger.error( - "@trace may not have wrapped %s correctly! " - "The function is not async but returned a %s.", - func.__qualname__, - type(result).__name__, - ) + elif inspect.isawaitable(result): + async def wrap_awaitable() -> Any: + try: + assert isinstance(result, Awaitable) + awaited_result = await result + scope.__exit__(None, None, None) + return awaited_result + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise + + # The original method returned an awaitable, eg. a coroutine, so we + # create another awaitable wrapping it that calls + # `scope.__exit__(...)`. + return wrap_awaitable() + else: + # Just a simple sync function so we can just exit the scope and + # return the result without any fuss. scope.__exit__(None, None, None) return result diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index e28ba84cc2..1bc7d64ad9 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import cast +from typing import Awaitable, cast from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactorClock @@ -227,8 +227,6 @@ class LogContextScopeManagerTestCase(TestCase): Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` with functions that return deferreds """ - reactor = MemoryReactorClock() - with LoggingContext("root context"): @trace_with_opname("fixture_deferred_func", tracer=self._tracer) @@ -240,9 +238,6 @@ class LogContextScopeManagerTestCase(TestCase): result_d1 = fixture_deferred_func() - # let the tasks complete - reactor.pump((2,) * 8) - self.assertEqual(self.successResultOf(result_d1), "foo") # the span should have been reported @@ -256,8 +251,6 @@ class LogContextScopeManagerTestCase(TestCase): Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` with async functions """ - reactor = MemoryReactorClock() - with LoggingContext("root context"): @trace_with_opname("fixture_async_func", tracer=self._tracer) @@ -267,9 +260,6 @@ class LogContextScopeManagerTestCase(TestCase): d1 = defer.ensureDeferred(fixture_async_func()) - # let the tasks complete - reactor.pump((2,) * 8) - self.assertEqual(self.successResultOf(d1), "foo") # the span should have been reported @@ -277,3 +267,34 @@ class LogContextScopeManagerTestCase(TestCase): [span.operation_name for span in self._reporter.get_spans()], ["fixture_async_func"], ) + + def test_trace_decorator_awaitable_return(self) -> None: + """ + Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args` + with functions that return an awaitable (e.g. a coroutine) + """ + with LoggingContext("root context"): + # Something we can return without `await` to get a coroutine + async def fixture_async_func() -> str: + return "foo" + + # The actual kind of function we want to test that returns an awaitable + @trace_with_opname("fixture_awaitable_return_func", tracer=self._tracer) + @tag_args + def fixture_awaitable_return_func() -> Awaitable[str]: + return fixture_async_func() + + # Something we can run with `defer.ensureDeferred(runner())` and pump the + # whole async tasks through to completion. + async def runner() -> str: + return await fixture_awaitable_return_func() + + d1 = defer.ensureDeferred(runner()) + + self.assertEqual(self.successResultOf(d1), "foo") + + # the span should have been reported + self.assertEqual( + [span.operation_name for span in self._reporter.get_spans()], + ["fixture_awaitable_return_func"], + ) From 9d911b0da651893e0b67cb3506e18582cb0d95b5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jun 2023 22:19:57 -0500 Subject: [PATCH 102/562] No need for the extra join since `membership` is built-in to `current_state_events` (#15731) This helps with the upstream `is_host_joined()` and `is_host_invited()` functions. `membership` was added to `current_state_events` in https://github.com/matrix-org/synapse/pull/5706 and forced in https://github.com/matrix-org/synapse/pull/13745 --- changelog.d/15731.misc | 1 + synapse/storage/databases/main/roommember.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15731.misc diff --git a/changelog.d/15731.misc b/changelog.d/15731.misc new file mode 100644 index 0000000000..906bc26962 --- /dev/null +++ b/changelog.d/15731.misc @@ -0,0 +1 @@ +Remove redundant table join with `room_memberships` when doing a `is_host_joined()`/`is_host_invited()` call (`membership` is already part of the `current_state_events`). diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index ae9c201b87..1b8ec67f54 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -927,11 +927,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): raise Exception("Invalid host name") sql = """ - SELECT state_key FROM current_state_events AS c - INNER JOIN room_memberships AS m USING (event_id) - WHERE m.membership = ? + SELECT state_key FROM current_state_events + WHERE membership = ? AND type = 'm.room.member' - AND c.room_id = ? + AND room_id = ? AND state_key LIKE ? LIMIT 1 """ From a701c089fa2a345243985a765506a52b50e50963 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 10:50:32 +0100 Subject: [PATCH 103/562] Fix schema delta error in 1.85 (#15738) There appears to be a race where you can end up with entries in `event_push_summary` with both a `NULL` and `main` thread ID. Fixes #15736 Introduced in #15597 --- changelog.d/15738.bugfix | 1 + .../main/delta/77/05thread_notifications_backfill.sql | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/15738.bugfix diff --git a/changelog.d/15738.bugfix b/changelog.d/15738.bugfix new file mode 100644 index 0000000000..7129ab0782 --- /dev/null +++ b/changelog.d/15738.bugfix @@ -0,0 +1 @@ +Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql index ce6f9ff937..b09aa817ae 100644 --- a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql +++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql @@ -21,6 +21,14 @@ DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_i -- Overwrite any null thread_id values. UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL; UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; + +-- Empirically we can end up with entries in the push summary table with both a +-- `NULL` and `main` thread ID, which causes the update below to fail. We fudge +-- this by deleting any `NULL` rows that have a corresponding `main`. +DELETE FROM event_push_summary AS a WHERE thread_id IS NULL AND EXISTS ( + SELECT 1 FROM event_push_summary AS b + WHERE b.thread_id = 'main' AND a.user_id = b.user_id AND a.room_id = b.room_id +); UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; -- Drop the background updates to calculate the indexes used to find null thread_ids. From 7acf7f2f8df9726c961b392f21ee7a92d062fb39 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 10:51:17 +0100 Subject: [PATCH 104/562] 1.85.1 --- CHANGES.md | 9 +++++++++ changelog.d/15738.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15738.bugfix diff --git a/CHANGES.md b/CHANGES.md index ea13b554ba..81bf3cc110 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.85.1 (2023-06-07) +=========================== + +Bugfixes +-------- + +- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738)) + + Synapse 1.85.0 (2023-06-06) =========================== diff --git a/changelog.d/15738.bugfix b/changelog.d/15738.bugfix deleted file mode 100644 index 7129ab0782..0000000000 --- a/changelog.d/15738.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. diff --git a/debian/changelog b/debian/changelog index 2278a83283..6d6f10ddf1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.1) stable; urgency=medium + + * New Synapse release 1.85.1. + + -- Synapse Packaging team Wed, 07 Jun 2023 10:51:12 +0100 + matrix-synapse-py3 (1.85.0) stable; urgency=medium * New Synapse release 1.85.0. diff --git a/pyproject.toml b/pyproject.toml index 745b58d7b5..5b6123dff6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.0" +version = "1.85.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From f7c6553ebce51a46f1c78aa0a3fc6cc1effb346d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 13:02:42 +0100 Subject: [PATCH 105/562] Fix schema delta error in 1.85 (#15739) Some users seem to have multiple rows per user / room with a null thread ID, which we need to handle. --- changelog.d/15739.bugfix | 1 + .../delta/77/05thread_notifications_backfill.sql | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15739.bugfix diff --git a/changelog.d/15739.bugfix b/changelog.d/15739.bugfix new file mode 100644 index 0000000000..7129ab0782 --- /dev/null +++ b/changelog.d/15739.bugfix @@ -0,0 +1 @@ +Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql index b09aa817ae..a5da7a17a0 100644 --- a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql +++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql @@ -23,13 +23,25 @@ UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; -- Empirically we can end up with entries in the push summary table with both a --- `NULL` and `main` thread ID, which causes the update below to fail. We fudge +-- `NULL` and `main` thread ID, which causes the insert below to fail. We fudge -- this by deleting any `NULL` rows that have a corresponding `main`. DELETE FROM event_push_summary AS a WHERE thread_id IS NULL AND EXISTS ( SELECT 1 FROM event_push_summary AS b WHERE b.thread_id = 'main' AND a.user_id = b.user_id AND a.room_id = b.room_id ); -UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; +-- Copy the NULL threads to have a 'main' thread ID. +-- +-- Note: Some people seem to have duplicate rows with a `NULL` thread ID, in +-- which case we just fudge it with using MAX of the values. The counts *may* be +-- wrong for such rooms, but a) its an edge case, and b) they'll be fixed when +-- the user reads the room. +INSERT INTO event_push_summary (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id) + SELECT user_id, room_id, MAX(notif_count), MAX(stream_ordering), MAX(unread_count), MAX(last_receipt_stream_ordering), 'main' + FROM event_push_summary + WHERE thread_id IS NULL + GROUP BY user_id, room_id, thread_id; + +DELETE FROM event_push_summary AS a WHERE thread_id IS NULL; -- Drop the background updates to calculate the indexes used to find null thread_ids. DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null'; From 28423977be8637bab096ed32085f06e715abe51b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 13:04:20 +0100 Subject: [PATCH 106/562] Update changelog --- CHANGES.md | 2 +- changelog.d/15739.bugfix | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 changelog.d/15739.bugfix diff --git a/CHANGES.md b/CHANGES.md index 81bf3cc110..a0f9235cac 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.85.1 (2023-06-07) Bugfixes -------- -- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738)) +- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738), [\#15739](https://github.com/matrix-org/synapse/issues/15739)) Synapse 1.85.0 (2023-06-06) diff --git a/changelog.d/15739.bugfix b/changelog.d/15739.bugfix deleted file mode 100644 index 7129ab0782..0000000000 --- a/changelog.d/15739.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. From 6cd6a2ae59e718b0695774e7348097af2c27d973 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Jun 2023 13:07:40 +0100 Subject: [PATCH 107/562] Update changelog --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index a0f9235cac..5babc22f2a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.85.1 (2023-06-07) =========================== +Note: this release only fixes a bug that stopped some deployments from upgrading to v1.85.0. There is no need to upgrade to v1.85.1 if successfully running v1.85.0. + Bugfixes -------- From 5c24d7b9ebd8dec2c76dac5118cee22a1bb1032a Mon Sep 17 00:00:00 2001 From: Grant McLean Date: Thu, 8 Jun 2023 03:21:25 +1200 Subject: [PATCH 108/562] Check required power levels earlier in createRoom handler. (#15695) * Check required power levels earlier in createRoom handler. - If a server was configured to reject the creation of rooms with E2EE enabled (by specifying an unattainably high power level for "m.room.encryption" in default_power_level_content_override), the 403 error was not being triggered until after the room was created and before the "m.room.power_levels" was sent. This allowed a user to access the partially-configured room and complete the setup of E2EE and power levels manually. - This change causes the power level overrides to be checked earlier and the request to be rejected before the user gains access to the room. - A new `_validate_room_config` method is added to contain checks that should be run before a room is created. - The new test case confirms that a user request is rejected by the new validation method. Signed-off-by: Grant McLean * Add a changelog file. * Formatting fix for black. * Remove unneeded line from test. --------- Signed-off-by: Grant McLean --- changelog.d/15695.bugfix | 1 + synapse/handlers/room.py | 76 +++++++++++++++++++++++++++------ tests/rest/client/test_rooms.py | 37 ++++++++++++++++ 3 files changed, 100 insertions(+), 14 deletions(-) create mode 100644 changelog.d/15695.bugfix diff --git a/changelog.d/15695.bugfix b/changelog.d/15695.bugfix new file mode 100644 index 0000000000..99bf1fe05e --- /dev/null +++ b/changelog.d/15695.bugfix @@ -0,0 +1 @@ +Check permissions for enabling encryption earlier during room creation to avoid creating broken rooms. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index cb957f2033..bf907b7881 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -872,6 +872,8 @@ class RoomCreationHandler: visibility = config.get("visibility", "private") is_public = visibility == "public" + self._validate_room_config(config, visibility) + room_id = await self._generate_and_create_room_id( creator_id=user_id, is_public=is_public, @@ -1111,20 +1113,7 @@ class RoomCreationHandler: return new_event, new_unpersisted_context - visibility = room_config.get("visibility", "private") - preset_config = room_config.get( - "preset", - RoomCreationPreset.PRIVATE_CHAT - if visibility == "private" - else RoomCreationPreset.PUBLIC_CHAT, - ) - - try: - config = self._presets_dict[preset_config] - except KeyError: - raise SynapseError( - 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON - ) + preset_config, config = self._room_preset_config(room_config) # MSC2175 removes the creator field from the create event. if not room_version.msc2175_implicit_room_creator: @@ -1306,6 +1295,65 @@ class RoomCreationHandler: assert last_event.internal_metadata.stream_ordering is not None return last_event.internal_metadata.stream_ordering, last_event.event_id, depth + def _validate_room_config( + self, + config: JsonDict, + visibility: str, + ) -> None: + """Checks configuration parameters for a /createRoom request. + + If validation detects invalid parameters an exception may be raised to + cause room creation to be aborted and an error response to be returned + to the client. + + Args: + config: A dict of configuration options. Originally from the body of + the /createRoom request + visibility: One of "public" or "private" + """ + + # Validate the requested preset, raise a 400 error if not valid + preset_name, preset_config = self._room_preset_config(config) + + # If the user is trying to create an encrypted room and this is forbidden + # by the configured default_power_level_content_override, then reject the + # request before the room is created. + raw_initial_state = config.get("initial_state", []) + room_encryption_event = any( + s.get("type", "") == EventTypes.RoomEncryption for s in raw_initial_state + ) + + if preset_config["encrypted"] or room_encryption_event: + if self._default_power_level_content_override: + override = self._default_power_level_content_override.get(preset_name) + if override is not None: + event_levels = override.get("events", {}) + room_admin_level = event_levels.get(EventTypes.PowerLevels, 100) + encryption_level = event_levels.get(EventTypes.RoomEncryption, 100) + if encryption_level > room_admin_level: + raise SynapseError( + 403, + f"You cannot create an encrypted room. user_level ({room_admin_level}) < send_level ({encryption_level})", + ) + + def _room_preset_config(self, room_config: JsonDict) -> Tuple[str, dict]: + # The spec says rooms should default to private visibility if + # `visibility` is not specified. + visibility = room_config.get("visibility", "private") + preset_name = room_config.get( + "preset", + RoomCreationPreset.PRIVATE_CHAT + if visibility == "private" + else RoomCreationPreset.PUBLIC_CHAT, + ) + try: + preset_config = self._presets_dict[preset_name] + except KeyError: + raise SynapseError( + 400, f"'{preset_name}' is not a valid preset", errcode=Codes.BAD_JSON + ) + return preset_name, preset_config + def _generate_room_id(self) -> str: """Generates a random room ID. diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 4d39c89f6f..f1b4e1ad2f 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -1941,6 +1941,43 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase): channel.json_body["error"], ) + @unittest.override_config( + { + "default_power_level_content_override": { + "private_chat": { + "events": { + "m.room.avatar": 50, + "m.room.canonical_alias": 50, + "m.room.encryption": 999, + "m.room.history_visibility": 100, + "m.room.name": 50, + "m.room.power_levels": 100, + "m.room.server_acl": 100, + "m.room.tombstone": 100, + }, + "events_default": 0, + }, + } + }, + ) + def test_config_override_blocks_encrypted_room(self) -> None: + # Given the server has config for private_chats, + + # When I attempt to create an encrypted private_chat room + channel = self.make_request( + "POST", + "/createRoom", + '{"creation_content": {"m.federate": false},"name": "Secret Private Room","preset": "private_chat","initial_state": [{"type": "m.room.encryption","state_key": "","content": {"algorithm": "m.megolm.v1.aes-sha2"}}]}', + ) + + # Then I am not allowed because the required power level is unattainable + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) + self.assertEqual( + "You cannot create an encrypted room. " + + "user_level (100) < send_level (999)", + channel.json_body["error"], + ) + class RoomInitialSyncTestCase(RoomBase): """Tests /rooms/$room_id/initialSync.""" From 195b6a298d509518bf16d5a421d706ecb2ccdce6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 7 Jun 2023 11:45:16 -0500 Subject: [PATCH 109/562] Remove redundant `room_memberships` join to find participating servers in a room (#15732) Spawning from https://github.com/matrix-org/synapse/pull/15731 --- changelog.d/15732.doc | 1 + docs/usage/administration/admin_faq.md | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15732.doc diff --git a/changelog.d/15732.doc b/changelog.d/15732.doc new file mode 100644 index 0000000000..b0e8639df7 --- /dev/null +++ b/changelog.d/15732.doc @@ -0,0 +1 @@ +Simplify query to find participating servers in a room. diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index 28c3dd53a5..5c9ee7d0aa 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -27,9 +27,8 @@ What servers are currently participating in this room? Run this sql query on your db: ```sql SELECT DISTINCT split_part(state_key, ':', 2) - FROM current_state_events AS c - INNER JOIN room_memberships AS m USING (room_id, event_id) - WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join'; +FROM current_state_events +WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join'; ``` What users are registered on my server? From e536f02f68135a8494f80ded75d1a53b98cbcb8d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 7 Jun 2023 11:47:01 -0500 Subject: [PATCH 110/562] Remove superfluous `room_memberships` join from background update (#15733) Spawning from https://github.com/matrix-org/synapse/pull/15731 --- changelog.d/15733.misc | 1 + synapse/storage/databases/main/roommember.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/15733.misc diff --git a/changelog.d/15733.misc b/changelog.d/15733.misc new file mode 100644 index 0000000000..3ae7be3c27 --- /dev/null +++ b/changelog.d/15733.misc @@ -0,0 +1 @@ +Remove superfluous `room_memberships` join from background update. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 1b8ec67f54..582875c91a 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1460,7 +1460,6 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): SELECT stream_ordering, event_id, events.room_id, event_json.json FROM events INNER JOIN event_json USING (event_id) - INNER JOIN room_memberships USING (event_id) WHERE ? <= stream_ordering AND stream_ordering < ? AND type = 'm.room.member' ORDER BY stream_ordering DESC From d162aecaac52fb467822e319e4c3c5b216c33ca9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 7 Jun 2023 18:12:23 +0100 Subject: [PATCH 111/562] Quick & dirty metric for background update status (#15740) * Quick & dirty metric for background update status * Changelog * Remove debug Co-authored-by: Mathieu Velten * Actually write to _aborted --------- Co-authored-by: Mathieu Velten --- changelog.d/15740.feature | 1 + synapse/metrics/__init__.py | 2 ++ synapse/storage/background_updates.py | 30 +++++++++++++++++++++++++++ synapse/storage/database.py | 8 ++++++- 4 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15740.feature diff --git a/changelog.d/15740.feature b/changelog.d/15740.feature new file mode 100644 index 0000000000..fed342ea55 --- /dev/null +++ b/changelog.d/15740.feature @@ -0,0 +1 @@ +Expose a metric reporting the database background update status. diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 8ce5887229..39fc629937 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -77,6 +77,8 @@ RegistryProxy = cast(CollectorRegistry, _RegistryProxy) @attr.s(slots=True, hash=True, auto_attribs=True) class LaterGauge(Collector): + """A Gauge which periodically calls a user-provided callback to produce metrics.""" + name: str desc: str labels: Optional[Sequence[str]] = attr.ib(hash=False) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index ca085ef800..edc97a9d61 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from enum import IntEnum from types import TracebackType from typing import ( TYPE_CHECKING, @@ -136,6 +137,15 @@ class BackgroundUpdatePerformance: return float(self.total_item_count) / float(self.total_duration_ms) +class UpdaterStatus(IntEnum): + # Use negative values for error conditions. + ABORTED = -1 + DISABLED = 0 + NOT_STARTED = 1 + RUNNING_UPDATE = 2 + COMPLETE = 3 + + class BackgroundUpdater: """Background updates are updates to the database that run in the background. Each update processes a batch of data at once. We attempt to @@ -158,11 +168,16 @@ class BackgroundUpdater: self._background_update_performance: Dict[str, BackgroundUpdatePerformance] = {} self._background_update_handlers: Dict[str, _BackgroundUpdateHandler] = {} + # TODO: all these bool flags make me feel icky---can we combine into a status + # enum? self._all_done = False # Whether we're currently running updates self._running = False + # Marker to be set if we abort and halt all background updates. + self._aborted = False + # Whether background updates are enabled. This allows us to # enable/disable background updates via the admin API. self.enabled = True @@ -175,6 +190,20 @@ class BackgroundUpdater: self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms self.sleep_enabled = hs.config.background_updates.sleep_enabled + def get_status(self) -> UpdaterStatus: + """An integer summarising the updater status. Used as a metric.""" + if self._aborted: + return UpdaterStatus.ABORTED + # TODO: a status for "have seen at least one failure, but haven't aborted yet". + if not self.enabled: + return UpdaterStatus.DISABLED + + if self._all_done: + return UpdaterStatus.COMPLETE + if self._running: + return UpdaterStatus.RUNNING_UPDATE + return UpdaterStatus.NOT_STARTED + def register_update_controller_callbacks( self, on_update: ON_UPDATE_CALLBACK, @@ -296,6 +325,7 @@ class BackgroundUpdater: except Exception: back_to_back_failures += 1 if back_to_back_failures >= 5: + self._aborted = True raise RuntimeError( "5 back-to-back background update failures; aborting." ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index bdaa508dbe..10fa6c4802 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -54,7 +54,7 @@ from synapse.logging.context import ( current_context, make_deferred_yieldable, ) -from synapse.metrics import register_threadpool +from synapse.metrics import LaterGauge, register_threadpool from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine @@ -547,6 +547,12 @@ class DatabasePool: self._db_pool = make_pool(hs.get_reactor(), database_config, engine) self.updates = BackgroundUpdater(hs, self) + LaterGauge( + "synapse_background_update_status", + "Background update status", + [], + self.updates.get_status, + ) self._previous_txn_total_time = 0.0 self._current_txn_total_time = 0.0 From 733342ad3ef271a2c5bd4ba442a15fa3be3dab30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:03:48 +0100 Subject: [PATCH 112/562] Fix using TLS for replication (#15746) Fixes #15744. --- changelog.d/15746.bugfix | 1 + synapse/http/replicationagent.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15746.bugfix diff --git a/changelog.d/15746.bugfix b/changelog.d/15746.bugfix new file mode 100644 index 0000000000..8d3e22f2e5 --- /dev/null +++ b/changelog.d/15746.bugfix @@ -0,0 +1 @@ +Fix regression where using TLS for replication did not work. Introduced in v1.85.0. diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py index 800f21873d..d6ba6f0e57 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py @@ -76,7 +76,7 @@ class ReplicationEndpointFactory: endpoint = wrapClientTLS( # The 'port' argument below isn't actually used by the function self.context_factory.creatorForNetloc( - self.instance_map[worker_name].host, + self.instance_map[worker_name].host.encode("utf-8"), self.instance_map[worker_name].port, ), endpoint, From a4921b23703776c9399433906b57c90fadb55bb6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:04:26 +0100 Subject: [PATCH 113/562] 1.85.2 --- CHANGES.md | 9 +++++++++ changelog.d/15746.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15746.bugfix diff --git a/CHANGES.md b/CHANGES.md index 5babc22f2a..f3eb0182f6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.85.2 (2023-06-08) +=========================== + +Bugfixes +-------- + +- Fix regression where using TLS for replication did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) + + Synapse 1.85.1 (2023-06-07) =========================== diff --git a/changelog.d/15746.bugfix b/changelog.d/15746.bugfix deleted file mode 100644 index 8d3e22f2e5..0000000000 --- a/changelog.d/15746.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix regression where using TLS for replication did not work. Introduced in v1.85.0. diff --git a/debian/changelog b/debian/changelog index 6d6f10ddf1..a7503ea60a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.85.2) stable; urgency=medium + + * New Synapse release 1.85.2. + + -- Synapse Packaging team Thu, 08 Jun 2023 13:04:18 +0100 + matrix-synapse-py3 (1.85.1) stable; urgency=medium * New Synapse release 1.85.1. diff --git a/pyproject.toml b/pyproject.toml index 5b6123dff6..02c9255f6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.1" +version = "1.85.2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From c485ed1c5a4c62ae555531cfd001a5e5f8bc2e44 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:14:40 +0100 Subject: [PATCH 114/562] Clear event caches when we purge history (#15609) This should help a little with #13476 --------- Co-authored-by: Patrick Cloke --- changelog.d/15609.bugfix | 1 + synapse/storage/_base.py | 31 ++++ synapse/storage/databases/main/cache.py | 134 ++++++++++++++++++ .../storage/databases/main/events_worker.py | 9 ++ .../storage/databases/main/purge_events.py | 8 +- synapse/util/caches/lrucache.py | 2 +- tests/handlers/test_sync.py | 2 +- tests/rest/client/test_read_marker.py | 3 - .../databases/main/test_events_worker.py | 8 +- 9 files changed, 184 insertions(+), 14 deletions(-) create mode 100644 changelog.d/15609.bugfix diff --git a/changelog.d/15609.bugfix b/changelog.d/15609.bugfix new file mode 100644 index 0000000000..b5a990cfec --- /dev/null +++ b/changelog.d/15609.bugfix @@ -0,0 +1 @@ +Correctly clear caches when we delete a room. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 481fec72fe..fe4a763411 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -86,9 +86,14 @@ class SQLBaseStore(metaclass=ABCMeta): room_id: Room where state changed members_changed: The user_ids of members that have changed """ + + # XXX: If you add something to this function make sure you add it to + # `_invalidate_state_caches_all` as well. + # If there were any membership changes, purge the appropriate caches. for host in {get_domain_from_id(u) for u in members_changed}: self._attempt_to_invalidate_cache("is_host_joined", (room_id, host)) + self._attempt_to_invalidate_cache("is_host_invited", (room_id, host)) if members_changed: self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,)) @@ -117,6 +122,32 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) + def _invalidate_state_caches_all(self, room_id: str) -> None: + """Invalidates caches that are based on the current state, but does + not stream invalidations down replication. + + Same as `_invalidate_state_caches`, except that works when we don't know + which memberships have changed. + + Args: + room_id: Room where state changed + """ + self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) + self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("is_host_invited", None) + self._attempt_to_invalidate_cache("is_host_joined", None) + self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) + self._attempt_to_invalidate_cache("get_number_joined_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) + self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) + self._attempt_to_invalidate_cache( + "get_rooms_for_user_with_stream_ordering", None + ) + self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) + def _attempt_to_invalidate_cache( self, cache_name: str, key: Optional[Collection[Any]] ) -> bool: diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 46fa0a73f9..6e1c7d681f 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -46,6 +46,12 @@ logger = logging.getLogger(__name__) # based on the current state when notifying workers over replication. CURRENT_STATE_CACHE_NAME = "cs_cache_fake" +# As above, but for invalidating event caches on history deletion +PURGE_HISTORY_CACHE_NAME = "ph_cache_fake" + +# As above, but for invalidating room caches on room deletion +DELETE_ROOM_CACHE_NAME = "dr_cache_fake" + class CacheInvalidationWorkerStore(SQLBaseStore): def __init__( @@ -175,6 +181,23 @@ class CacheInvalidationWorkerStore(SQLBaseStore): room_id = row.keys[0] members_changed = set(row.keys[1:]) self._invalidate_state_caches(room_id, members_changed) + elif row.cache_func == PURGE_HISTORY_CACHE_NAME: + if row.keys is None: + raise Exception( + "Can't send an 'invalidate all' for 'purge history' cache" + ) + + room_id = row.keys[0] + self._invalidate_caches_for_room_events(room_id) + elif row.cache_func == DELETE_ROOM_CACHE_NAME: + if row.keys is None: + raise Exception( + "Can't send an 'invalidate all' for 'delete room' cache" + ) + + room_id = row.keys[0] + self._invalidate_caches_for_room_events(room_id) + self._invalidate_caches_for_room(room_id) else: self._attempt_to_invalidate_cache(row.cache_func, row.keys) @@ -226,6 +249,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): relates_to: Optional[str], backfilled: bool, ) -> None: + # XXX: If you add something to this function make sure you add it to + # `_invalidate_caches_for_room_events` as well. + # This invalidates any local in-memory cached event objects, the original # process triggering the invalidation is responsible for clearing any external # cached objects. @@ -271,6 +297,106 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,)) self._attempt_to_invalidate_cache("get_threads", (room_id,)) + def _invalidate_caches_for_room_events_and_stream( + self, txn: LoggingTransaction, room_id: str + ) -> None: + """Invalidate caches associated with events in a room, and stream to + replication. + + Used when we delete events a room, but don't know which events we've + deleted. + """ + + self._send_invalidation_to_replication(txn, PURGE_HISTORY_CACHE_NAME, [room_id]) + txn.call_after(self._invalidate_caches_for_room_events, room_id) + + def _invalidate_caches_for_room_events(self, room_id: str) -> None: + """Invalidate caches associated with events in a room, and stream to + replication. + + Used when we delete events in a room, but don't know which events we've + deleted. + """ + + self._invalidate_local_get_event_cache_all() # type: ignore[attr-defined] + + self._attempt_to_invalidate_cache("have_seen_event", (room_id,)) + self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,)) + self._attempt_to_invalidate_cache( + "get_unread_event_push_actions_by_room_for_user", (room_id,) + ) + + self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) + self._attempt_to_invalidate_cache("get_relations_for_event", None) + self._attempt_to_invalidate_cache("get_applicable_edit", None) + self._attempt_to_invalidate_cache("get_thread_id", None) + self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None) + self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None) + self._attempt_to_invalidate_cache( + "get_rooms_for_user_with_stream_ordering", None + ) + self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache("get_references_for_event", None) + self._attempt_to_invalidate_cache("get_thread_summary", None) + self._attempt_to_invalidate_cache("get_thread_participated", None) + self._attempt_to_invalidate_cache("get_threads", (room_id,)) + + self._attempt_to_invalidate_cache("_get_state_group_for_event", None) + + self._attempt_to_invalidate_cache("get_event_ordering", None) + self._attempt_to_invalidate_cache("is_partial_state_event", None) + self._attempt_to_invalidate_cache("_get_joined_profile_from_event_id", None) + + def _invalidate_caches_for_room_and_stream( + self, txn: LoggingTransaction, room_id: str + ) -> None: + """Invalidate caches associated with rooms, and stream to replication. + + Used when we delete rooms. + """ + + self._send_invalidation_to_replication(txn, DELETE_ROOM_CACHE_NAME, [room_id]) + txn.call_after(self._invalidate_caches_for_room, room_id) + + def _invalidate_caches_for_room(self, room_id: str) -> None: + """Invalidate caches associated with rooms. + + Used when we delete rooms. + """ + + # If we've deleted the room then we also need to purge all event caches. + self._invalidate_caches_for_room_events(room_id) + + self._attempt_to_invalidate_cache("get_account_data_for_room", None) + self._attempt_to_invalidate_cache("get_account_data_for_room_and_type", None) + self._attempt_to_invalidate_cache("get_aliases_for_room", (room_id,)) + self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,)) + self._attempt_to_invalidate_cache("_get_forward_extremeties_for_room", None) + self._attempt_to_invalidate_cache( + "get_unread_event_push_actions_by_room_for_user", (room_id,) + ) + self._attempt_to_invalidate_cache( + "_get_linearized_receipts_for_room", (room_id,) + ) + self._attempt_to_invalidate_cache("is_room_blocked", (room_id,)) + self._attempt_to_invalidate_cache("get_retention_policy_for_room", (room_id,)) + self._attempt_to_invalidate_cache( + "_get_partial_state_servers_at_join", (room_id,) + ) + self._attempt_to_invalidate_cache("is_partial_state_room", (room_id,)) + self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None) + self._attempt_to_invalidate_cache( + "get_current_hosts_in_room_ordered", (room_id,) + ) + self._attempt_to_invalidate_cache("did_forget", None) + self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) + self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) + self._attempt_to_invalidate_cache("get_room_version_id", (room_id,)) + + # And delete state caches. + + self._invalidate_state_caches_all(room_id) + async def invalidate_cache_and_stream( self, cache_name: str, keys: Tuple[Any, ...] ) -> None: @@ -377,6 +503,14 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "Can't stream invalidate all with magic current state cache" ) + if cache_name == PURGE_HISTORY_CACHE_NAME and keys is None: + raise Exception( + "Can't stream invalidate all with magic purge history cache" + ) + + if cache_name == DELETE_ROOM_CACHE_NAME and keys is None: + raise Exception("Can't stream invalidate all with magic delete room cache") + if isinstance(self.database_engine, PostgresEngine): assert self._cache_id_gen is not None diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index a39bc90974..d93ffc4efa 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -903,6 +903,15 @@ class EventsWorkerStore(SQLBaseStore): self._event_ref.pop(event_id, None) self._current_event_fetches.pop(event_id, None) + def _invalidate_local_get_event_cache_all(self) -> None: + """Clears the in-memory get event caches. + + Used when we purge room history. + """ + self._get_event_cache.clear() + self._event_ref.clear() + self._current_event_fetches.clear() + async def _get_events_from_cache( self, events: Iterable[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index efbd3e75d9..9773c1fcd2 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -308,6 +308,8 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): logger.info("[purge] done") + self._invalidate_caches_for_room_events_and_stream(txn, room_id) + return referenced_state_groups async def purge_room(self, room_id: str) -> List[int]: @@ -485,10 +487,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # index on them. In any case we should be clearing out 'stream' tables # periodically anyway (#5888) - # TODO: we could probably usefully do a bunch more cache invalidation here - - # XXX: as with purge_history, this is racy, but no worse than other races - # that already exist. - self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,)) + self._invalidate_caches_for_room_and_stream(txn, room_id) return state_groups diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index ed0da17227..6137c85e10 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -862,5 +862,5 @@ class AsyncLruCache(Generic[KT, VT]): async def contains(self, key: KT) -> bool: return self._lru_cache.contains(key) - async def clear(self) -> None: + def clear(self) -> None: self._lru_cache.clear() diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 0d9a3de92a..9f035a02dc 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -163,7 +163,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() self.store.get_rooms_for_user.invalidate_all() - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() self.store._event_ref.clear() # The rooms should be excluded from the sync response. diff --git a/tests/rest/client/test_read_marker.py b/tests/rest/client/test_read_marker.py index 0eedcdb476..5cdd5694a0 100644 --- a/tests/rest/client/test_read_marker.py +++ b/tests/rest/client/test_read_marker.py @@ -131,9 +131,6 @@ class ReadMarkerTestCase(unittest.HomeserverTestCase): event = self.get_success(self.store.get_event(event_id_1, allow_none=True)) assert event is None - # TODO See https://github.com/matrix-org/synapse/issues/13476 - self.store.get_event_ordering.invalidate_all() - # Test moving the read marker to a newer event event_id_2 = send_message() channel = self.make_request( diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 9606ecc43b..788500e38f 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -188,7 +188,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() def test_simple(self) -> None: """Test that we cache events that we pull from the DB.""" @@ -205,7 +205,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): """ # Reset the event cache - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() with LoggingContext("test") as ctx: # We keep hold of the event event though we never use it. @@ -215,7 +215,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) # Reset the event cache - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() with LoggingContext("test") as ctx: self.get_success(self.store.get_event(self.event_id)) @@ -390,7 +390,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.get_success(self.store._get_event_cache.clear()) + self.store._get_event_cache.clear() @contextmanager def blocking_get_event_calls( From ac3a70a7dd4070bf3953b8913f7c316d701db588 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Jun 2023 13:15:56 +0100 Subject: [PATCH 115/562] Fix up changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f3eb0182f6..893ceccaea 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.85.2 (2023-06-08) Bugfixes -------- -- Fix regression where using TLS for replication did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) +- Fix regression where using TLS for HTTP replication between workers did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) Synapse 1.85.1 (2023-06-07) From d84e66144dc12dacf71c987a2ba802dd59c0b68e Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 9 Jun 2023 00:00:46 -0700 Subject: [PATCH 116/562] Allow for the configuration of max request retries and min/max retry delays in the matrix federation client (#12504) Co-authored-by: Mathieu Velten Co-authored-by: Erik Johnston --- changelog.d/12504.misc | 1 + .../configuration/config_documentation.md | 26 +++++++++++++++++++ synapse/config/federation.py | 10 +++++++ synapse/http/matrixfederationclient.py | 21 ++++++++------- tests/http/test_matrixfederationclient.py | 20 +++++++++++++- 5 files changed, 68 insertions(+), 10 deletions(-) create mode 100644 changelog.d/12504.misc diff --git a/changelog.d/12504.misc b/changelog.d/12504.misc new file mode 100644 index 0000000000..0bebaa213d --- /dev/null +++ b/changelog.d/12504.misc @@ -0,0 +1 @@ +Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 0cf6e075ff..8426de0417 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1196,6 +1196,32 @@ Example configuration: allow_device_name_lookup_over_federation: true ``` --- +### `federation` + +The federation section defines some sub-options related to federation. + +The following options are related to configuring timeout and retry logic for one request, +independently of the others. +Short retry algorithm is used when something or someone will wait for the request to have an +answer, while long retry is used for requests that happen in the background, +like sending a federation transaction. + +* `client_timeout`: timeout for the federation requests in seconds. Default to 60s. +* `max_short_retry_delay`: maximum delay to be used for the short retry algo in seconds. Default to 2s. +* `max_long_retry_delay`: maximum delay to be used for the short retry algo in seconds. Default to 60s. +* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts. +* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts. + +Example configuration: +```yaml +federation: + client_timeout: 180 + max_short_retry_delay: 7 + max_long_retry_delay: 100 + max_short_retries: 5 + max_long_retries: 20 +``` +--- ## Caching Options related to caching. diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 336fca578a..d21f7fd02a 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -22,6 +22,8 @@ class FederationConfig(Config): section = "federation" def read_config(self, config: JsonDict, **kwargs: Any) -> None: + federation_config = config.setdefault("federation", {}) + # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist: Optional[dict] = None federation_domain_whitelist = config.get("federation_domain_whitelist", None) @@ -49,5 +51,13 @@ class FederationConfig(Config): "allow_device_name_lookup_over_federation", False ) + # Allow for the configuration of timeout, max request retries + # and min/max retry delays in the matrix federation client. + self.client_timeout = federation_config.get("client_timeout", 60) + self.max_long_retry_delay = federation_config.get("max_long_retry_delay", 60) + self.max_short_retry_delay = federation_config.get("max_short_retry_delay", 2) + self.max_long_retries = federation_config.get("max_long_retries", 10) + self.max_short_retries = federation_config.get("max_short_retries", 3) + _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index abb5ae5815..ed36825b67 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -95,8 +95,6 @@ incoming_responses_counter = Counter( ) -MAX_LONG_RETRIES = 10 -MAX_SHORT_RETRIES = 3 MAXINT = sys.maxsize @@ -406,7 +404,12 @@ class MatrixFederationHttpClient: self.clock = hs.get_clock() self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") - self.default_timeout = 60 + self.default_timeout = hs.config.federation.client_timeout + + self.max_long_retry_delay = hs.config.federation.max_long_retry_delay + self.max_short_retry_delay = hs.config.federation.max_short_retry_delay + self.max_long_retries = hs.config.federation.max_long_retries + self.max_short_retries = hs.config.federation.max_short_retries self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) @@ -583,9 +586,9 @@ class MatrixFederationHttpClient: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: - retries_left = MAX_LONG_RETRIES + retries_left = self.max_long_retries else: - retries_left = MAX_SHORT_RETRIES + retries_left = self.max_short_retries url_bytes = request.uri url_str = url_bytes.decode("ascii") @@ -730,12 +733,12 @@ class MatrixFederationHttpClient: if retries_left and not timeout: if long_retries: - delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) - delay = min(delay, 60) + delay = 4 ** (self.max_long_retries + 1 - retries_left) + delay = min(delay, self.max_long_retry_delay) delay *= random.uniform(0.8, 1.4) else: - delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) - delay = min(delay, 2) + delay = 0.5 * 2 ** (self.max_short_retries - retries_left) + delay = min(delay, self.max_short_retry_delay) delay *= random.uniform(0.8, 1.4) logger.debug( diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 0dfc03ce50..8565f8ac64 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -40,7 +40,7 @@ from synapse.server import HomeServer from synapse.util import Clock from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config def check_logcontext(context: LoggingContextOrSentinel) -> None: @@ -640,3 +640,21 @@ class FederationClientTests(HomeserverTestCase): self.cl.build_auth_headers( b"", b"GET", b"https://example.com", destination_is=b"" ) + + @override_config( + { + "federation": { + "client_timeout": 180, + "max_long_retry_delay": 100, + "max_short_retry_delay": 7, + "max_long_retries": 20, + "max_short_retries": 5, + } + } + ) + def test_configurable_retry_and_delay_values(self) -> None: + self.assertEqual(self.cl.default_timeout, 180) + self.assertEqual(self.cl.max_long_retry_delay, 100) + self.assertEqual(self.cl.max_short_retry_delay, 7) + self.assertEqual(self.cl.max_long_retries, 20) + self.assertEqual(self.cl.max_short_retries, 5) From 373c0c7ff7cf55b5f46aba43f4c4f9bba5c79c0e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 9 Jun 2023 15:00:30 +0100 Subject: [PATCH 117/562] Speed up typechecking CI (#15752) By restoring the rust cache before installing the project. --- .github/workflows/tests.yml | 8 ++++---- changelog.d/15752.misc | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15752.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cf1899b580..02a4be3a24 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -92,6 +92,10 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 + - name: Install Rust + uses: dtolnay/rust-toolchain@1.58.1 + - uses: Swatinem/rust-cache@v2 + - name: Setup Poetry uses: matrix-org/setup-python-poetry@v1 with: @@ -103,10 +107,6 @@ jobs: # To make CI green, err towards caution and install the project. install-project: "true" - - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 - - uses: Swatinem/rust-cache@v2 - # Cribbed from # https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17 - name: Restore/persist mypy's cache diff --git a/changelog.d/15752.misc b/changelog.d/15752.misc new file mode 100644 index 0000000000..7e373b1275 --- /dev/null +++ b/changelog.d/15752.misc @@ -0,0 +1 @@ +Speed up typechecking CI. From fcc3ca37e1b404981d9a0d6f2708e14407775b97 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 9 Jun 2023 15:39:49 -0500 Subject: [PATCH 118/562] Backfill in the background if we're doing it "just because" (#15710) Fix https://github.com/matrix-org/synapse/issues/15702 --- changelog.d/15710.feature | 1 + synapse/handlers/federation.py | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15710.feature diff --git a/changelog.d/15710.feature b/changelog.d/15710.feature new file mode 100644 index 0000000000..fe77a2fef6 --- /dev/null +++ b/changelog.d/15710.feature @@ -0,0 +1 @@ +Speed up `/messages` by backfilling in the background when there are no backward extremities where we are directly paginating. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57d6b70cff..b7b5e21020 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -320,14 +320,21 @@ class FederationHandler: str(len(sorted_backfill_points)), ) - # If we have no backfill points lower than the `current_depth` then - # either we can a) bail or b) still attempt to backfill. We opt to try - # backfilling anyway just in case we do get relevant events. + # If we have no backfill points lower than the `current_depth` then either we + # can a) bail or b) still attempt to backfill. We opt to try backfilling anyway + # just in case we do get relevant events. This is good for eventual consistency + # sake but we don't need to block the client for something that is just as + # likely not to return anything relevant so we backfill in the background. The + # only way, this could return something relevant is if we discover a new branch + # of history that extends all the way back to where we are currently paginating + # and it's within the 100 events that are returned from `/backfill`. if not sorted_backfill_points and current_depth != MAX_DEPTH: logger.debug( "_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points." ) - return await self._maybe_backfill_inner( + run_as_background_process( + "_maybe_backfill_inner_anyway_with_max_depth", + self._maybe_backfill_inner, room_id=room_id, # We use `MAX_DEPTH` so that we find all backfill points next # time (all events are below the `MAX_DEPTH`) @@ -338,6 +345,9 @@ class FederationHandler: # overall otherwise the smaller one will throw off the results. processing_start_time=None, ) + # We return `False` because we're backfilling in the background and there is + # no new events immediately for the caller to know about yet. + return False # Even after recursing with `MAX_DEPTH`, we didn't find any # backward extremities to backfill from. From 4f2bd6be695c83007ebd6f817b74c5a97cf01e4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:17:04 +0100 Subject: [PATCH 119/562] Bump types-pyopenssl from 23.1.0.2 to 23.2.0.0 (#15766) Bumps [types-pyopenssl](https://github.com/python/typeshed) from 23.1.0.2 to 23.2.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pyopenssl dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 166 +++------------------------------------------------- 1 file changed, 7 insertions(+), 159 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1f5cb3a3a8..228fccac9c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "alabaster" version = "0.7.13" description = "A configurable sidebar-enabled Sphinx theme" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "astroid" version = "2.15.0" description = "An abstract syntax tree for Python with inference support." -category = "dev" optional = false python-versions = ">=3.7.2" files = [ @@ -36,7 +34,6 @@ wrapt = [ name = "attrs" version = "22.2.0" description = "Classes Without Boilerplate" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -55,7 +52,6 @@ tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy name = "authlib" version = "1.2.0" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." -category = "main" optional = true python-versions = "*" files = [ @@ -70,7 +66,6 @@ cryptography = ">=3.2" name = "automat" version = "22.10.0" description = "Self-service finite-state machines for the programmer on the go." -category = "main" optional = false python-versions = "*" files = [ @@ -89,7 +84,6 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"] name = "babel" version = "2.12.1" description = "Internationalization utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -104,7 +98,6 @@ pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} name = "bcrypt" version = "4.0.1" description = "Modern password hashing for your software and your servers" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -139,7 +132,6 @@ typecheck = ["mypy"] name = "beautifulsoup4" version = "4.12.0" description = "Screen-scraping library" -category = "dev" optional = false python-versions = ">=3.6.0" files = [ @@ -158,7 +150,6 @@ lxml = ["lxml"] name = "black" version = "23.3.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -209,7 +200,6 @@ uvloop = ["uvloop (>=0.15.2)"] name = "bleach" version = "6.0.0" description = "An easy safelist-based HTML-sanitizing tool." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -228,7 +218,6 @@ css = ["tinycss2 (>=1.1.0,<1.2)"] name = "canonicaljson" version = "2.0.0" description = "Canonical JSON" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -240,7 +229,6 @@ files = [ name = "certifi" version = "2022.12.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -252,7 +240,6 @@ files = [ name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" files = [ @@ -329,7 +316,6 @@ pycparser = "*" name = "charset-normalizer" version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -414,7 +400,6 @@ files = [ name = "click" version = "8.1.3" description = "Composable command line interface toolkit" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -430,7 +415,6 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} name = "click-default-group" version = "1.2.2" description = "Extends click.Group to invoke a command without explicit subcommand name" -category = "dev" optional = false python-versions = "*" files = [ @@ -444,7 +428,6 @@ click = "*" name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -456,7 +439,6 @@ files = [ name = "commonmark" version = "0.9.1" description = "Python parser for the CommonMark Markdown spec" -category = "dev" optional = false python-versions = "*" files = [ @@ -471,7 +453,6 @@ test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] name = "constantly" version = "15.1.0" description = "Symbolic constants in Python" -category = "main" optional = false python-versions = "*" files = [ @@ -483,7 +464,6 @@ files = [ name = "cryptography" version = "40.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -525,7 +505,6 @@ tox = ["tox"] name = "defusedxml" version = "0.7.1" description = "XML bomb protection for Python stdlib modules" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -537,7 +516,6 @@ files = [ name = "deprecated" version = "1.2.13" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -555,7 +533,6 @@ dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version name = "docutils" version = "0.19" description = "Docutils -- Python Documentation Utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -567,7 +544,6 @@ files = [ name = "elementpath" version = "4.1.0" description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and lxml" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -582,7 +558,6 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", name = "furo" version = "2023.5.20" description = "A clean customisable Sphinx documentation theme." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -600,7 +575,6 @@ sphinx-basic-ng = "*" name = "gitdb" version = "4.0.10" description = "Git Object Database" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -615,7 +589,6 @@ smmap = ">=3.0.1,<6" name = "gitpython" version = "3.1.31" description = "GitPython is a Python library used to interact with Git repositories" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -631,7 +604,6 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\"" name = "hiredis" version = "2.2.3" description = "Python wrapper for hiredis" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -730,7 +702,6 @@ files = [ name = "hyperlink" version = "21.0.0" description = "A featureful, immutable, and correct URL for Python." -category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -745,7 +716,6 @@ idna = ">=2.5" name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -757,7 +727,6 @@ files = [ name = "ijson" version = "3.2.0.post0" description = "Iterative JSON parser with standard Python iterator interfaces" -category = "main" optional = false python-versions = "*" files = [ @@ -845,7 +814,6 @@ files = [ name = "imagesize" version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -857,7 +825,6 @@ files = [ name = "immutabledict" version = "2.2.4" description = "Immutable wrapper around dictionaries (a fork of frozendict)" -category = "main" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -869,7 +836,6 @@ files = [ name = "importlib-metadata" version = "6.6.0" description = "Read metadata from Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -890,7 +856,6 @@ testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packag name = "importlib-resources" version = "5.12.0" description = "Read resources from Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -909,7 +874,6 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec name = "incremental" version = "22.10.0" description = "\"A small library that versions your Python projects.\"" -category = "main" optional = false python-versions = "*" files = [ @@ -925,7 +889,6 @@ scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] name = "isort" version = "5.11.5" description = "A Python utility / library to sort Python imports." -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -943,7 +906,6 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"] name = "jaeger-client" version = "4.8.0" description = "Jaeger Python OpenTracing Tracer implementation" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -963,7 +925,6 @@ tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-import name = "jaraco-classes" version = "3.2.3" description = "Utility functions for Python class constructs" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -982,7 +943,6 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec name = "jeepney" version = "0.8.0" description = "Low-level, pure Python DBus protocol wrapper." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -998,7 +958,6 @@ trio = ["async_generator", "trio"] name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1016,7 +975,6 @@ i18n = ["Babel (>=2.7)"] name = "jsonschema" version = "4.17.3" description = "An implementation of JSON Schema validation for Python" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1040,7 +998,6 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- name = "keyring" version = "23.13.1" description = "Store and access your passwords safely." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1065,7 +1022,6 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec name = "lazy-object-proxy" version = "1.9.0" description = "A fast and thorough lazy object proxy." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1111,7 +1067,6 @@ files = [ name = "ldap3" version = "2.9.1" description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library" -category = "main" optional = true python-versions = "*" files = [ @@ -1126,7 +1081,6 @@ pyasn1 = ">=0.4.6" name = "lxml" version = "4.9.2" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" files = [ @@ -1219,7 +1173,6 @@ source = ["Cython (>=0.29.7)"] name = "lxml-stubs" version = "0.4.0" description = "Type annotations for the lxml package" -category = "dev" optional = false python-versions = "*" files = [ @@ -1234,7 +1187,6 @@ test = ["coverage[toml] (==5.2)", "pytest (>=6.0.0)", "pytest-mypy-plugins (==1. name = "markdown-it-py" version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1260,7 +1212,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "markupsafe" version = "2.1.2" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1320,7 +1271,6 @@ files = [ name = "matrix-common" version = "1.3.0" description = "Common utilities for Synapse, Sydent and Sygnal" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1340,7 +1290,6 @@ test = ["aiounittest", "tox", "twisted"] name = "matrix-synapse-ldap3" version = "0.2.2" description = "An LDAP3 auth provider for Synapse" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1360,7 +1309,6 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma name = "mdit-py-plugins" version = "0.3.5" description = "Collection of plugins for markdown-it-py" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1380,7 +1328,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1392,7 +1339,6 @@ files = [ name = "more-itertools" version = "9.1.0" description = "More routines for operating on iterables, beyond itertools" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1404,7 +1350,6 @@ files = [ name = "msgpack" version = "1.0.5" description = "MessagePack serializer" -category = "main" optional = false python-versions = "*" files = [ @@ -1477,7 +1422,6 @@ files = [ name = "mypy" version = "1.0.1" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1525,7 +1469,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1537,7 +1480,6 @@ files = [ name = "mypy-zope" version = "0.9.1" description = "Plugin for mypy to support zope interfaces" -category = "dev" optional = false python-versions = "*" files = [ @@ -1557,7 +1499,6 @@ test = ["lxml", "pytest (>=4.6)", "pytest-cov"] name = "myst-parser" version = "1.0.0" description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser," -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1584,7 +1525,6 @@ testing-docutils = ["pygments", "pytest (>=7,<8)", "pytest-param-files (>=0.3.4, name = "netaddr" version = "0.8.0" description = "A network address manipulation library for Python" -category = "main" optional = false python-versions = "*" files = [ @@ -1596,7 +1536,6 @@ files = [ name = "opentracing" version = "2.4.0" description = "OpenTracing API for Python. See documentation at http://opentracing.io" -category = "main" optional = true python-versions = "*" files = [ @@ -1610,7 +1549,6 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte name = "packaging" version = "23.1" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1622,7 +1560,6 @@ files = [ name = "parameterized" version = "0.9.0" description = "Parameterized testing with any Python test framework" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1637,7 +1574,6 @@ dev = ["jinja2"] name = "pathspec" version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1649,7 +1585,6 @@ files = [ name = "phonenumbers" version = "8.13.11" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." -category = "main" optional = false python-versions = "*" files = [ @@ -1661,7 +1596,6 @@ files = [ name = "pillow" version = "9.4.0" description = "Python Imaging Library (Fork)" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1752,7 +1686,6 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa name = "pkginfo" version = "1.9.6" description = "Query metadata from sdists / bdists / installed packages." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1767,7 +1700,6 @@ testing = ["pytest", "pytest-cov"] name = "pkgutil-resolve-name" version = "1.3.10" description = "Resolve a name to an object." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1779,7 +1711,6 @@ files = [ name = "platformdirs" version = "3.1.1" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1798,7 +1729,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes name = "prometheus-client" version = "0.17.0" description = "Python client for the Prometheus monitoring system." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1813,7 +1743,6 @@ twisted = ["twisted"] name = "psycopg2" version = "2.9.6" description = "psycopg2 - Python-PostgreSQL Database Adapter" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -1836,7 +1765,6 @@ files = [ name = "psycopg2cffi" version = "2.9.0" description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=master" -category = "main" optional = true python-versions = "*" files = [ @@ -1851,7 +1779,6 @@ six = "*" name = "psycopg2cffi-compat" version = "1.1" description = "A Simple library to enable psycopg2 compatability" -category = "main" optional = true python-versions = "*" files = [ @@ -1865,7 +1792,6 @@ psycopg2 = "*" name = "pyasn1" version = "0.5.0" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -1877,7 +1803,6 @@ files = [ name = "pyasn1-modules" version = "0.3.0" description = "A collection of ASN.1-based protocols modules" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -1892,7 +1817,6 @@ pyasn1 = ">=0.4.6,<0.6.0" name = "pycparser" version = "2.21" description = "C parser in Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1904,7 +1828,6 @@ files = [ name = "pydantic" version = "1.10.8" description = "Data validation and settings management using python type hints" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1957,7 +1880,6 @@ email = ["email-validator (>=1.0.3)"] name = "pygithub" version = "1.58.2" description = "Use the full Github API v3" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1975,7 +1897,6 @@ requests = ">=2.14.0" name = "pygments" version = "2.14.0" description = "Pygments is a syntax highlighting package written in Python." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1990,7 +1911,6 @@ plugins = ["importlib-metadata"] name = "pyicu" version = "2.11" description = "Python extension wrapping the ICU C++ API" -category = "main" optional = true python-versions = "*" files = [ @@ -2001,7 +1921,6 @@ files = [ name = "pyjwt" version = "2.6.0" description = "JSON Web Token implementation in Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2022,7 +1941,6 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] name = "pymacaroons" version = "0.13.0" description = "Macaroon library for Python" -category = "main" optional = false python-versions = "*" files = [ @@ -2038,7 +1956,6 @@ six = ">=1.8.0" name = "pympler" version = "1.0.1" description = "A development tool to measure, monitor and analyze the memory behavior of Python objects." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2050,7 +1967,6 @@ files = [ name = "pynacl" version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2077,7 +1993,6 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] name = "pyopenssl" version = "23.1.1" description = "Python wrapper module around the OpenSSL library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2096,7 +2011,6 @@ test = ["flaky", "pretend", "pytest (>=3.0.1)"] name = "pyrsistent" version = "0.19.3" description = "Persistent/Functional/Immutable data structures" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2133,7 +2047,6 @@ files = [ name = "pysaml2" version = "7.3.1" description = "Python implementation of SAML Version 2 Standard" -category = "main" optional = true python-versions = ">=3.6.2,<4.0.0" files = [ @@ -2159,7 +2072,6 @@ s2repoze = ["paste", "repoze.who", "zope.interface"] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -2174,7 +2086,6 @@ six = ">=1.5" name = "pytz" version = "2022.7.1" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -2186,7 +2097,6 @@ files = [ name = "pywin32-ctypes" version = "0.2.0" description = "" -category = "dev" optional = false python-versions = "*" files = [ @@ -2198,7 +2108,6 @@ files = [ name = "pyyaml" version = "6.0" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2248,7 +2157,6 @@ files = [ name = "readme-renderer" version = "37.3" description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2268,7 +2176,6 @@ md = ["cmarkgfm (>=0.8.0)"] name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2290,7 +2197,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "requests-toolbelt" version = "0.10.1" description = "A utility belt for advanced users of python-requests" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -2305,7 +2211,6 @@ requests = ">=2.0.1,<3.0.0" name = "rfc3986" version = "2.0.0" description = "Validating URI References per RFC 3986" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2320,7 +2225,6 @@ idna2008 = ["idna"] name = "rich" version = "13.3.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -2340,7 +2244,6 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] name = "ruff" version = "0.0.265" description = "An extremely fast Python linter, written in Rust." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2367,7 +2270,6 @@ files = [ name = "secretstorage" version = "3.3.3" description = "Python bindings to FreeDesktop.org Secret Service API" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2383,7 +2285,6 @@ jeepney = ">=0.6" name = "semantic-version" version = "2.10.0" description = "A library implementing the 'SemVer' scheme." -category = "main" optional = false python-versions = ">=2.7" files = [ @@ -2399,7 +2300,6 @@ doc = ["Sphinx", "sphinx-rtd-theme"] name = "sentry-sdk" version = "1.25.0" description = "Python client for Sentry (https://sentry.io)" -category = "main" optional = true python-versions = "*" files = [ @@ -2442,7 +2342,6 @@ tornado = ["tornado (>=5)"] name = "service-identity" version = "21.1.0" description = "Service identity verification for pyOpenSSL & cryptography." -category = "main" optional = false python-versions = "*" files = [ @@ -2467,7 +2366,6 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] name = "setuptools" version = "67.6.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2484,7 +2382,6 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( name = "setuptools-rust" version = "1.6.0" description = "Setuptools Rust extension plugin" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2501,7 +2398,6 @@ typing-extensions = ">=3.7.4.3" name = "signedjson" version = "1.1.4" description = "Sign JSON with Ed25519 signatures" -category = "main" optional = false python-versions = "*" files = [ @@ -2523,7 +2419,6 @@ dev = ["typing-extensions (>=3.5)"] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -2535,7 +2430,6 @@ files = [ name = "smmap" version = "5.0.0" description = "A pure Python implementation of a sliding window memory map manager" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2547,7 +2441,6 @@ files = [ name = "snowballstemmer" version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -category = "dev" optional = false python-versions = "*" files = [ @@ -2559,7 +2452,6 @@ files = [ name = "sortedcontainers" version = "2.4.0" description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" -category = "main" optional = false python-versions = "*" files = [ @@ -2571,7 +2463,6 @@ files = [ name = "soupsieve" version = "2.4" description = "A modern CSS selector implementation for Beautiful Soup." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2583,7 +2474,6 @@ files = [ name = "sphinx" version = "6.2.1" description = "Python documentation generator" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -2619,7 +2509,6 @@ test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"] name = "sphinx-autodoc2" version = "0.4.2" description = "Analyse a python project and create documentation for it." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -2642,7 +2531,6 @@ testing = ["pytest", "pytest-cov", "pytest-regressions", "sphinx (>=4.0.0)"] name = "sphinx-basic-ng" version = "1.0.0b1" description = "A modern skeleton for Sphinx themes." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2660,7 +2548,6 @@ docs = ["furo", "ipython", "myst-parser", "sphinx-copybutton", "sphinx-inline-ta name = "sphinxcontrib-applehelp" version = "1.0.4" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -2676,7 +2563,6 @@ test = ["pytest"] name = "sphinxcontrib-devhelp" version = "1.0.2" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -2692,7 +2578,6 @@ test = ["pytest"] name = "sphinxcontrib-htmlhelp" version = "2.0.1" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -2708,7 +2593,6 @@ test = ["html5lib", "pytest"] name = "sphinxcontrib-jsmath" version = "1.0.1" description = "A sphinx extension which renders display math in HTML via JavaScript" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -2723,7 +2607,6 @@ test = ["flake8", "mypy", "pytest"] name = "sphinxcontrib-qthelp" version = "1.0.3" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -2739,7 +2622,6 @@ test = ["pytest"] name = "sphinxcontrib-serializinghtml" version = "1.1.5" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -2755,7 +2637,6 @@ test = ["pytest"] name = "systemd-python" version = "235" description = "Python interface for libsystemd" -category = "main" optional = true python-versions = "*" files = [ @@ -2766,7 +2647,6 @@ files = [ name = "threadloop" version = "1.0.2" description = "Tornado IOLoop Backed Concurrent Futures" -category = "main" optional = true python-versions = "*" files = [ @@ -2781,7 +2661,6 @@ tornado = "*" name = "thrift" version = "0.16.0" description = "Python bindings for the Apache Thrift RPC system" -category = "main" optional = true python-versions = "*" files = [ @@ -2800,7 +2679,6 @@ twisted = ["twisted"] name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2812,7 +2690,6 @@ files = [ name = "tornado" version = "6.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -2833,7 +2710,6 @@ files = [ name = "towncrier" version = "22.12.0" description = "Building newsfiles for your project." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2856,7 +2732,6 @@ dev = ["furo", "packaging", "sphinx (>=5)", "twisted"] name = "treq" version = "22.2.0" description = "High-level Twisted HTTP Client API" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2879,7 +2754,6 @@ docs = ["sphinx (>=1.4.8)"] name = "twine" version = "4.0.2" description = "Collection of utilities for publishing packages on PyPI" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2902,7 +2776,6 @@ urllib3 = ">=1.26.0" name = "twisted" version = "22.10.0" description = "An asynchronous networking framework written in Python" -category = "main" optional = false python-versions = ">=3.7.1" files = [ @@ -2944,7 +2817,6 @@ windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0. name = "twisted-iocpsupport" version = "1.0.2" description = "An extension for use in the twisted I/O Completion Ports reactor." -category = "main" optional = false python-versions = "*" files = [ @@ -2966,7 +2838,6 @@ files = [ name = "txredisapi" version = "1.4.9" description = "non-blocking redis client for python" -category = "main" optional = true python-versions = "*" files = [ @@ -2982,7 +2853,6 @@ twisted = "*" name = "typed-ast" version = "1.5.4" description = "a fork of Python 2 and 3 ast modules with type comment support" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -3016,7 +2886,6 @@ files = [ name = "types-bleach" version = "6.0.0.3" description = "Typing stubs for bleach" -category = "dev" optional = false python-versions = "*" files = [ @@ -3028,7 +2897,6 @@ files = [ name = "types-commonmark" version = "0.9.2.3" description = "Typing stubs for commonmark" -category = "dev" optional = false python-versions = "*" files = [ @@ -3040,7 +2908,6 @@ files = [ name = "types-jsonschema" version = "4.17.0.8" description = "Typing stubs for jsonschema" -category = "dev" optional = false python-versions = "*" files = [ @@ -3052,7 +2919,6 @@ files = [ name = "types-netaddr" version = "0.8.0.8" description = "Typing stubs for netaddr" -category = "dev" optional = false python-versions = "*" files = [ @@ -3064,7 +2930,6 @@ files = [ name = "types-opentracing" version = "2.4.10.4" description = "Typing stubs for opentracing" -category = "dev" optional = false python-versions = "*" files = [ @@ -3076,7 +2941,6 @@ files = [ name = "types-pillow" version = "9.5.0.4" description = "Typing stubs for Pillow" -category = "dev" optional = false python-versions = "*" files = [ @@ -3088,7 +2952,6 @@ files = [ name = "types-psycopg2" version = "2.9.21.10" description = "Typing stubs for psycopg2" -category = "dev" optional = false python-versions = "*" files = [ @@ -3098,14 +2961,13 @@ files = [ [[package]] name = "types-pyopenssl" -version = "23.1.0.2" +version = "23.2.0.0" description = "Typing stubs for pyOpenSSL" -category = "dev" optional = false python-versions = "*" files = [ - {file = "types-pyOpenSSL-23.1.0.2.tar.gz", hash = "sha256:20b80971b86240e8432a1832bd8124cea49c3088c7bfc77dfd23be27ffe4a517"}, - {file = "types_pyOpenSSL-23.1.0.2-py3-none-any.whl", hash = "sha256:b050641aeff6dfebf231ad719bdac12d53b8ee818d4afb67b886333484629957"}, + {file = "types-pyOpenSSL-23.2.0.0.tar.gz", hash = "sha256:43e307e8dfb3a7a8208a19874ca060305f460c529d4eaca8a2669ea89499f244"}, + {file = "types_pyOpenSSL-23.2.0.0-py3-none-any.whl", hash = "sha256:ba803a99440b0c2e9ab4e197084aeefc55bdfe8a580d367b2aa4210810a21240"}, ] [package.dependencies] @@ -3115,7 +2977,6 @@ cryptography = ">=35.0.0" name = "types-pyyaml" version = "6.0.12.10" description = "Typing stubs for PyYAML" -category = "dev" optional = false python-versions = "*" files = [ @@ -3127,7 +2988,6 @@ files = [ name = "types-requests" version = "2.31.0.1" description = "Typing stubs for requests" -category = "dev" optional = false python-versions = "*" files = [ @@ -3142,7 +3002,6 @@ types-urllib3 = "*" name = "types-setuptools" version = "67.8.0.0" description = "Typing stubs for setuptools" -category = "dev" optional = false python-versions = "*" files = [ @@ -3154,7 +3013,6 @@ files = [ name = "types-urllib3" version = "1.26.25.8" description = "Typing stubs for urllib3" -category = "dev" optional = false python-versions = "*" files = [ @@ -3166,7 +3024,6 @@ files = [ name = "typing-extensions" version = "4.5.0" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3178,7 +3035,6 @@ files = [ name = "unpaddedbase64" version = "2.1.0" description = "Encode and decode Base64 without \"=\" padding" -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -3190,7 +3046,6 @@ files = [ name = "urllib3" version = "1.26.15" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ @@ -3207,7 +3062,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" -category = "main" optional = false python-versions = "*" files = [ @@ -3219,7 +3073,6 @@ files = [ name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ @@ -3304,7 +3157,6 @@ files = [ name = "xmlschema" version = "2.2.2" description = "An XML Schema validator and decoder" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3324,7 +3176,6 @@ docs = ["Sphinx", "elementpath (>=4.0.0,<5.0.0)", "jinja2", "sphinx-rtd-theme"] name = "zipp" version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3340,7 +3191,6 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more name = "zope-event" version = "4.6" description = "Very basic event publishing system" -category = "dev" optional = false python-versions = "*" files = [ @@ -3359,7 +3209,6 @@ test = ["zope.testrunner"] name = "zope-interface" version = "6.0" description = "Interfaces for Python" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3407,7 +3256,6 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] name = "zope-schema" version = "7.0.1" description = "zope.interface extension for defining data schemas" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3425,18 +3273,18 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"] +all = ["Pympler", "authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pyicu", "pysaml2", "sentry-sdk", "txredisapi"] cache-memory = ["Pympler"] jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] opentracing = ["jaeger-client", "opentracing"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] -redis = ["txredisapi", "hiredis"] +redis = ["hiredis", "txredisapi"] saml2 = ["pysaml2"] sentry = ["sentry-sdk"] systemd = ["systemd-python"] -test = ["parameterized", "idna"] +test = ["idna", "parameterized"] url-preview = ["lxml"] user-search = ["pyicu"] From 046e7e494a11f9a23c0dcd2defae595b35a37579 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:17:40 +0100 Subject: [PATCH 120/562] Bump phonenumbers from 8.13.11 to 8.13.13 (#15763) Bumps [phonenumbers](https://github.com/daviddrysdale/python-phonenumbers) from 8.13.11 to 8.13.13. - [Commits](https://github.com/daviddrysdale/python-phonenumbers/compare/v8.13.11...v8.13.13) --- updated-dependencies: - dependency-name: phonenumbers dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 228fccac9c..d726407c58 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1583,13 +1583,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.11" +version = "8.13.13" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.11-py2.py3-none-any.whl", hash = "sha256:107469114fd297258a485bdf8238d0522cb392db1257faf2bf23384ecbdb0e8a"}, - {file = "phonenumbers-8.13.11.tar.gz", hash = "sha256:3e3274d88cab3609b55ff5b93417075dbca2d13064f103fbf562e0ea1dda0f9a"}, + {file = "phonenumbers-8.13.13-py2.py3-none-any.whl", hash = "sha256:55657adb607484aba6d56270b8a1f9b302f35496076e6c02051d06ed366374d9"}, + {file = "phonenumbers-8.13.13.tar.gz", hash = "sha256:4bdf8c989aff0cdb105aef170ad2c21f14b4537bcb32cf349f1f710df992a40a"}, ] [[package]] From aad7e2d0c18ee7ba87bef3750da3c962acc2fd95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:19:01 +0100 Subject: [PATCH 121/562] Bump sentry-sdk from 1.25.0 to 1.25.1 (#15764) Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.25.0 to 1.25.1. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.25.0...1.25.1) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d726407c58..f2221680a8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2298,13 +2298,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.25.0" +version = "1.25.1" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.25.0.tar.gz", hash = "sha256:5be3296fc574fa8a4d9b213b4dcf8c8d0246c08f8bd78315c6286f386c37555a"}, - {file = "sentry_sdk-1.25.0-py2.py3-none-any.whl", hash = "sha256:fe85cf5d0b3d0aa3480df689f9f6dc487de783defb0a95043368375dc893645e"}, + {file = "sentry-sdk-1.25.1.tar.gz", hash = "sha256:aa796423eb6a2f4a8cd7a5b02ba6558cb10aab4ccdc0537f63a47b038c520c38"}, + {file = "sentry_sdk-1.25.1-py2.py3-none-any.whl", hash = "sha256:79afb7c896014038e358401ad1d36889f97a129dfa8031c49b3f238cd1aa3935"}, ] [package.dependencies] From 0aa731cb6f1e145cf399a948e14f77d4e3720190 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:19:43 +0100 Subject: [PATCH 122/562] Bump pydantic from 1.10.8 to 1.10.9 (#15762) Bumps [pydantic](https://github.com/pydantic/pydantic) from 1.10.8 to 1.10.9. - [Release notes](https://github.com/pydantic/pydantic/releases) - [Changelog](https://github.com/pydantic/pydantic/blob/main/HISTORY.md) - [Commits](https://github.com/pydantic/pydantic/compare/v1.10.8...v1.10.9) --- updated-dependencies: - dependency-name: pydantic dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 74 ++++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/poetry.lock b/poetry.lock index f2221680a8..5e41682894 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1826,47 +1826,47 @@ files = [ [[package]] name = "pydantic" -version = "1.10.8" +version = "1.10.9" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1243d28e9b05003a89d72e7915fdb26ffd1d39bdd39b00b7dbe4afae4b557f9d"}, - {file = "pydantic-1.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0ab53b609c11dfc0c060d94335993cc2b95b2150e25583bec37a49b2d6c6c3f"}, - {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9613fadad06b4f3bc5db2653ce2f22e0de84a7c6c293909b48f6ed37b83c61f"}, - {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df7800cb1984d8f6e249351139667a8c50a379009271ee6236138a22a0c0f319"}, - {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0c6fafa0965b539d7aab0a673a046466d23b86e4b0e8019d25fd53f4df62c277"}, - {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e82d4566fcd527eae8b244fa952d99f2ca3172b7e97add0b43e2d97ee77f81ab"}, - {file = "pydantic-1.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:ab523c31e22943713d80d8d342d23b6f6ac4b792a1e54064a8d0cf78fd64e800"}, - {file = "pydantic-1.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:666bdf6066bf6dbc107b30d034615d2627e2121506c555f73f90b54a463d1f33"}, - {file = "pydantic-1.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35db5301b82e8661fa9c505c800d0990bc14e9f36f98932bb1d248c0ac5cada5"}, - {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90c1e29f447557e9e26afb1c4dbf8768a10cc676e3781b6a577841ade126b85"}, - {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e766b4a8226e0708ef243e843105bf124e21331694367f95f4e3b4a92bbb3f"}, - {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88f195f582851e8db960b4a94c3e3ad25692c1c1539e2552f3df7a9e972ef60e"}, - {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:34d327c81e68a1ecb52fe9c8d50c8a9b3e90d3c8ad991bfc8f953fb477d42fb4"}, - {file = "pydantic-1.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:d532bf00f381bd6bc62cabc7d1372096b75a33bc197a312b03f5838b4fb84edd"}, - {file = "pydantic-1.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d5b8641c24886d764a74ec541d2fc2c7fb19f6da2a4001e6d580ba4a38f7878"}, - {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f6cb446470b7ddf86c2e57cd119a24959af2b01e552f60705910663af09a4"}, - {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c33b60054b2136aef8cf190cd4c52a3daa20b2263917c49adad20eaf381e823b"}, - {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1952526ba40b220b912cdc43c1c32bcf4a58e3f192fa313ee665916b26befb68"}, - {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb14388ec45a7a0dc429e87def6396f9e73c8c77818c927b6a60706603d5f2ea"}, - {file = "pydantic-1.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:16f8c3e33af1e9bb16c7a91fc7d5fa9fe27298e9f299cff6cb744d89d573d62c"}, - {file = "pydantic-1.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ced8375969673929809d7f36ad322934c35de4af3b5e5b09ec967c21f9f7887"}, - {file = "pydantic-1.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93e6bcfccbd831894a6a434b0aeb1947f9e70b7468f274154d03d71fabb1d7c6"}, - {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:191ba419b605f897ede9892f6c56fb182f40a15d309ef0142212200a10af4c18"}, - {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052d8654cb65174d6f9490cc9b9a200083a82cf5c3c5d3985db765757eb3b375"}, - {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ceb6a23bf1ba4b837d0cfe378329ad3f351b5897c8d4914ce95b85fba96da5a1"}, - {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f2e754d5566f050954727c77f094e01793bcb5725b663bf628fa6743a5a9108"}, - {file = "pydantic-1.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a82d6cda82258efca32b40040228ecf43a548671cb174a1e81477195ed3ed56"}, - {file = "pydantic-1.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e59417ba8a17265e632af99cc5f35ec309de5980c440c255ab1ca3ae96a3e0e"}, - {file = "pydantic-1.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84d80219c3f8d4cad44575e18404099c76851bc924ce5ab1c4c8bb5e2a2227d0"}, - {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4148e635994d57d834be1182a44bdb07dd867fa3c2d1b37002000646cc5459"}, - {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12f7b0bf8553e310e530e9f3a2f5734c68699f42218bf3568ef49cd9b0e44df4"}, - {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42aa0c4b5c3025483240a25b09f3c09a189481ddda2ea3a831a9d25f444e03c1"}, - {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17aef11cc1b997f9d574b91909fed40761e13fac438d72b81f902226a69dac01"}, - {file = "pydantic-1.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:66a703d1983c675a6e0fed8953b0971c44dba48a929a2000a493c3772eb61a5a"}, - {file = "pydantic-1.10.8-py3-none-any.whl", hash = "sha256:7456eb22ed9aaa24ff3e7b4757da20d9e5ce2a81018c1b3ebd81a0b88a18f3b2"}, - {file = "pydantic-1.10.8.tar.gz", hash = "sha256:1410275520dfa70effadf4c21811d755e7ef9bb1f1d077a21958153a92c8d9ca"}, + {file = "pydantic-1.10.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e692dec4a40bfb40ca530e07805b1208c1de071a18d26af4a2a0d79015b352ca"}, + {file = "pydantic-1.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c52eb595db83e189419bf337b59154bdcca642ee4b2a09e5d7797e41ace783f"}, + {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:939328fd539b8d0edf244327398a667b6b140afd3bf7e347cf9813c736211896"}, + {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b48d3d634bca23b172f47f2335c617d3fcb4b3ba18481c96b7943a4c634f5c8d"}, + {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f0b7628fb8efe60fe66fd4adadd7ad2304014770cdc1f4934db41fe46cc8825f"}, + {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1aa5c2410769ca28aa9a7841b80d9d9a1c5f223928ca8bec7e7c9a34d26b1d4"}, + {file = "pydantic-1.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:eec39224b2b2e861259d6f3c8b6290d4e0fbdce147adb797484a42278a1a486f"}, + {file = "pydantic-1.10.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d111a21bbbfd85c17248130deac02bbd9b5e20b303338e0dbe0faa78330e37e0"}, + {file = "pydantic-1.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e9aec8627a1a6823fc62fb96480abe3eb10168fd0d859ee3d3b395105ae19a7"}, + {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07293ab08e7b4d3c9d7de4949a0ea571f11e4557d19ea24dd3ae0c524c0c334d"}, + {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee829b86ce984261d99ff2fd6e88f2230068d96c2a582f29583ed602ef3fc2c"}, + {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b466a23009ff5cdd7076eb56aca537c745ca491293cc38e72bf1e0e00de5b91"}, + {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7847ca62e581e6088d9000f3c497267868ca2fa89432714e21a4fb33a04d52e8"}, + {file = "pydantic-1.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:7845b31959468bc5b78d7b95ec52fe5be32b55d0d09983a877cca6aedc51068f"}, + {file = "pydantic-1.10.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:517a681919bf880ce1dac7e5bc0c3af1e58ba118fd774da2ffcd93c5f96eaece"}, + {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67195274fd27780f15c4c372f4ba9a5c02dad6d50647b917b6a92bf00b3d301a"}, + {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2196c06484da2b3fded1ab6dbe182bdabeb09f6318b7fdc412609ee2b564c49a"}, + {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6257bb45ad78abacda13f15bde5886efd6bf549dd71085e64b8dcf9919c38b60"}, + {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3283b574b01e8dbc982080d8287c968489d25329a463b29a90d4157de4f2baaf"}, + {file = "pydantic-1.10.9-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8bbaf4013b9a50e8100333cc4e3fa2f81214033e05ac5aa44fa24a98670a29"}, + {file = "pydantic-1.10.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9cd67fb763248cbe38f0593cd8611bfe4b8ad82acb3bdf2b0898c23415a1f82"}, + {file = "pydantic-1.10.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f50e1764ce9353be67267e7fd0da08349397c7db17a562ad036aa7c8f4adfdb6"}, + {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73ef93e5e1d3c8e83f1ff2e7fdd026d9e063c7e089394869a6e2985696693766"}, + {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128d9453d92e6e81e881dd7e2484e08d8b164da5507f62d06ceecf84bf2e21d3"}, + {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ad428e92ab68798d9326bb3e5515bc927444a3d71a93b4a2ca02a8a5d795c572"}, + {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fab81a92f42d6d525dd47ced310b0c3e10c416bbfae5d59523e63ea22f82b31e"}, + {file = "pydantic-1.10.9-cp38-cp38-win_amd64.whl", hash = "sha256:963671eda0b6ba6926d8fc759e3e10335e1dc1b71ff2a43ed2efd6996634dafb"}, + {file = "pydantic-1.10.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:970b1bdc6243ef663ba5c7e36ac9ab1f2bfecb8ad297c9824b542d41a750b298"}, + {file = "pydantic-1.10.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7e1d5290044f620f80cf1c969c542a5468f3656de47b41aa78100c5baa2b8276"}, + {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83fcff3c7df7adff880622a98022626f4f6dbce6639a88a15a3ce0f96466cb60"}, + {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0da48717dc9495d3a8f215e0d012599db6b8092db02acac5e0d58a65248ec5bc"}, + {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0a2aabdc73c2a5960e87c3ffebca6ccde88665616d1fd6d3db3178ef427b267a"}, + {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9863b9420d99dfa9c064042304868e8ba08e89081428a1c471858aa2af6f57c4"}, + {file = "pydantic-1.10.9-cp39-cp39-win_amd64.whl", hash = "sha256:e7c9900b43ac14110efa977be3da28931ffc74c27e96ee89fbcaaf0b0fe338e1"}, + {file = "pydantic-1.10.9-py3-none-any.whl", hash = "sha256:6cafde02f6699ce4ff643417d1a9223716ec25e228ddc3b436fe7e2d25a1f305"}, + {file = "pydantic-1.10.9.tar.gz", hash = "sha256:95c70da2cd3b6ddf3b9645ecaa8d98f3d80c606624b6d245558d202cd23ea3be"}, ] [package.dependencies] From 9e321e0098d069711674371c8c3a3cdc80df0c16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:20:55 +0100 Subject: [PATCH 123/562] Bump pyopenssl from 23.1.1 to 23.2.0 (#15765) Bumps [pyopenssl](https://github.com/pyca/pyopenssl) from 23.1.1 to 23.2.0. - [Changelog](https://github.com/pyca/pyopenssl/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/pyopenssl/compare/23.1.1...23.2.0) --- updated-dependencies: - dependency-name: pyopenssl dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5e41682894..cf4a89c85a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1991,17 +1991,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "23.1.1" +version = "23.2.0" description = "Python wrapper module around the OpenSSL library" optional = false python-versions = ">=3.6" files = [ - {file = "pyOpenSSL-23.1.1-py3-none-any.whl", hash = "sha256:9e0c526404a210df9d2b18cd33364beadb0dc858a739b885677bc65e105d4a4c"}, - {file = "pyOpenSSL-23.1.1.tar.gz", hash = "sha256:841498b9bec61623b1b6c47ebbc02367c07d60e0e195f19790817f10cc8db0b7"}, + {file = "pyOpenSSL-23.2.0-py3-none-any.whl", hash = "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2"}, + {file = "pyOpenSSL-23.2.0.tar.gz", hash = "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"}, ] [package.dependencies] -cryptography = ">=38.0.0,<41" +cryptography = ">=38.0.0,<40.0.0 || >40.0.0,<40.0.1 || >40.0.1,<42" [package.extras] docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"] From 42eb4fea1c671bd7a3eacf329c9afc6644081e4f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:21:20 +0100 Subject: [PATCH 124/562] Bump serde from 1.0.163 to 1.0.164 (#15760) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.163 to 1.0.164. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.163...v1.0.164) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08331385c0..f34a72c269 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -320,18 +320,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2", "quote", From 0b104364f9f118be0ec722894650fad9583bf59c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 09:22:21 +0100 Subject: [PATCH 125/562] Bump pyo3-log from 0.8.1 to 0.8.2 (#15759) Bumps [pyo3-log](https://github.com/vorner/pyo3-log) from 0.8.1 to 0.8.2. - [Changelog](https://github.com/vorner/pyo3-log/blob/main/CHANGELOG.md) - [Commits](https://github.com/vorner/pyo3-log/compare/v0.8.1...v0.8.2) --- updated-dependencies: - dependency-name: pyo3-log dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f34a72c269..4f75452b3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,9 +229,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c" +checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444" dependencies = [ "arc-swap", "log", From ba97b39881e296f4775b8f6dd18edb98a3dc733f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Jun 2023 14:27:11 +0100 Subject: [PATCH 126/562] Bump minimum supported Rust version (#15768) Important crates such as `log` and `regex` have bumped theirs to 1.60.0 as well. --- .github/workflows/tests.yml | 18 +++++++++--------- changelog.d/15768.misc | 1 + docs/upgrade.md | 8 ++++++++ rust/Cargo.toml | 2 +- 4 files changed, 19 insertions(+), 10 deletions(-) create mode 100644 changelog.d/15768.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 02a4be3a24..a0d1c24e90 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -35,7 +35,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -93,7 +93,7 @@ jobs: uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 - name: Setup Poetry @@ -150,7 +150,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -167,7 +167,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 with: components: clippy - uses: Swatinem/rust-cache@v2 @@ -268,7 +268,7 @@ jobs: postgres:${{ matrix.job.postgres-version }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -308,7 +308,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 # There aren't wheels for some of the older deps, so we need to install @@ -416,7 +416,7 @@ jobs: run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 - name: Run SyTest @@ -556,7 +556,7 @@ jobs: path: synapse - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 - uses: actions/setup-go@v4 @@ -584,7 +584,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.58.1 + uses: dtolnay/rust-toolchain@1.60.0 - uses: Swatinem/rust-cache@v2 - run: cargo test diff --git a/changelog.d/15768.misc b/changelog.d/15768.misc new file mode 100644 index 0000000000..bc4b86323c --- /dev/null +++ b/changelog.d/15768.misc @@ -0,0 +1 @@ +Bump minimum supported Rust version to 1.60.0. diff --git a/docs/upgrade.md b/docs/upgrade.md index 49ab00c057..4cd38b1393 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -87,6 +87,14 @@ process, for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.86.0 + +## Minimum supported Rust version + +The minimum supported Rust version has been increased from v1.58.1 to v1.60.0. +Users building from source will need to ensure their `rustc` version is up to +date. + # Upgrading to v1.85.0 diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 533a8cc677..3ead01c052 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -7,7 +7,7 @@ name = "synapse" version = "0.1.0" edition = "2021" -rust-version = "1.58.1" +rust-version = "1.60.0" [lib] name = "synapse" From 8afc9a4cda9b884bde1f6c87f7cb3087d04418a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 10:05:13 +0100 Subject: [PATCH 127/562] Bump log from 0.4.18 to 0.4.19 (#15761) Bumps [log](https://github.com/rust-lang/log) from 0.4.18 to 0.4.19. - [Release notes](https://github.com/rust-lang/log/releases) - [Changelog](https://github.com/rust-lang/log/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/log/compare/0.4.18...0.4.19) --- updated-dependencies: - dependency-name: log dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f75452b3e..9724af5dca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,9 +132,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "memchr" From 99c850f79821e12ad1895b9505f8612752deea52 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 10:05:29 +0100 Subject: [PATCH 128/562] Bump regex from 1.7.3 to 1.8.4 (#15769) Bumps [regex](https://github.com/rust-lang/regex) from 1.7.3 to 1.8.4. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.7.3...1.8.4) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9724af5dca..9bb8225226 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "aho-corasick" -version = "0.7.19" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.3" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick", "memchr", @@ -302,9 +302,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.29" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "ryu" From 9966eb10a3671958992b57d723fab27b57b6faff Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 13 Jun 2023 14:30:51 +0200 Subject: [PATCH 129/562] 1.86.0rc1 --- CHANGES.md | 69 +++++++++++++++++++++++++++++++++++++++ changelog.d/12504.misc | 1 - changelog.d/14213.misc | 1 - changelog.d/15388.feature | 1 - changelog.d/15450.feature | 1 - changelog.d/15520.feature | 1 - changelog.d/15582.feature | 1 - changelog.d/15609.bugfix | 1 - changelog.d/15649.misc | 1 - changelog.d/15650.misc | 1 - changelog.d/15674.feature | 1 - changelog.d/15675.misc | 1 - changelog.d/15689.misc | 1 - changelog.d/15690.misc | 1 - changelog.d/15694.misc | 1 - changelog.d/15695.bugfix | 1 - changelog.d/15697.misc | 1 - changelog.d/15705.feature | 1 - changelog.d/15709.misc | 1 - changelog.d/15710.feature | 1 - changelog.d/15721.misc | 1 - changelog.d/15723.misc | 1 - changelog.d/15725.misc | 1 - changelog.d/15726.misc | 1 - changelog.d/15729.misc | 1 - changelog.d/15731.misc | 1 - changelog.d/15732.doc | 1 - changelog.d/15733.misc | 1 - changelog.d/15740.feature | 1 - changelog.d/15752.misc | 1 - changelog.d/15768.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 33 files changed, 76 insertions(+), 31 deletions(-) delete mode 100644 changelog.d/12504.misc delete mode 100644 changelog.d/14213.misc delete mode 100644 changelog.d/15388.feature delete mode 100644 changelog.d/15450.feature delete mode 100644 changelog.d/15520.feature delete mode 100644 changelog.d/15582.feature delete mode 100644 changelog.d/15609.bugfix delete mode 100644 changelog.d/15649.misc delete mode 100644 changelog.d/15650.misc delete mode 100644 changelog.d/15674.feature delete mode 100644 changelog.d/15675.misc delete mode 100644 changelog.d/15689.misc delete mode 100644 changelog.d/15690.misc delete mode 100644 changelog.d/15694.misc delete mode 100644 changelog.d/15695.bugfix delete mode 100644 changelog.d/15697.misc delete mode 100644 changelog.d/15705.feature delete mode 100644 changelog.d/15709.misc delete mode 100644 changelog.d/15710.feature delete mode 100644 changelog.d/15721.misc delete mode 100644 changelog.d/15723.misc delete mode 100644 changelog.d/15725.misc delete mode 100644 changelog.d/15726.misc delete mode 100644 changelog.d/15729.misc delete mode 100644 changelog.d/15731.misc delete mode 100644 changelog.d/15732.doc delete mode 100644 changelog.d/15733.misc delete mode 100644 changelog.d/15740.feature delete mode 100644 changelog.d/15752.misc delete mode 100644 changelog.d/15768.misc diff --git a/CHANGES.md b/CHANGES.md index 5debbc35b6..99c246a3bc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,72 @@ +Synapse 1.86.0rc1 (2023-06-13) +============================== + +Features +-------- + +- Stable support for [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#15388](https://github.com/matrix-org/synapse/issues/15388)) +- Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. ([\#15450](https://github.com/matrix-org/synapse/issues/15450)) +- Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#15520](https://github.com/matrix-org/synapse/issues/15520)) +- Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. ([\#15582](https://github.com/matrix-org/synapse/issues/15582)) +- Add Syanpse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. ([\#15674](https://github.com/matrix-org/synapse/issues/15674)) +- Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. ([\#15705](https://github.com/matrix-org/synapse/issues/15705)) +- Speed up `/messages` by backfilling in the background when there are no backward extremities where we are directly paginating. ([\#15710](https://github.com/matrix-org/synapse/issues/15710)) +- Expose a metric reporting the database background update status. ([\#15740](https://github.com/matrix-org/synapse/issues/15740)) + + +Bugfixes +-------- + +- Correctly clear caches when we delete a room. ([\#15609](https://github.com/matrix-org/synapse/issues/15609)) +- Check permissions for enabling encryption earlier during room creation to avoid creating broken rooms. ([\#15695](https://github.com/matrix-org/synapse/issues/15695)) + + +Improved Documentation +---------------------- + +- Simplify query to find participating servers in a room. ([\#15732](https://github.com/matrix-org/synapse/issues/15732)) + + +Internal Changes +---------------- + +- Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. ([\#12504](https://github.com/matrix-org/synapse/issues/12504)) +- Log when events are (maybe unexpectedly) filtered out of responses in tests. ([\#14213](https://github.com/matrix-org/synapse/issues/14213)) +- Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. ([\#15649](https://github.com/matrix-org/synapse/issues/15649)) +- Add support for tracing functions which return `Awaitable`s. ([\#15650](https://github.com/matrix-org/synapse/issues/15650)) +- Cache requests for user's devices over federation. ([\#15675](https://github.com/matrix-org/synapse/issues/15675)) +- Add fully qualified docker image names to Dockerfiles. ([\#15689](https://github.com/matrix-org/synapse/issues/15689)) +- Remove some unused code. ([\#15690](https://github.com/matrix-org/synapse/issues/15690)) +- Improve type hints. ([\#15694](https://github.com/matrix-org/synapse/issues/15694), [\#15697](https://github.com/matrix-org/synapse/issues/15697)) +- Update docstring and traces on `maybe_backfill()` functions. ([\#15709](https://github.com/matrix-org/synapse/issues/15709)) +- Add context for when/why to use the `long_retries` option when sending Federation requests. ([\#15721](https://github.com/matrix-org/synapse/issues/15721)) +- Removed some unused fields. ([\#15723](https://github.com/matrix-org/synapse/issues/15723)) +- Update federation error to more plainly explain we can only authorize our own membership events. ([\#15725](https://github.com/matrix-org/synapse/issues/15725)) +- Prevent the `latest_deps` and `twisted_trunk` daily GitHub Actions workflows from running on forks of the codebase. ([\#15726](https://github.com/matrix-org/synapse/issues/15726)) +- Improve performance of user directory search. ([\#15729](https://github.com/matrix-org/synapse/issues/15729)) +- Remove redundant table join with `room_memberships` when doing a `is_host_joined()`/`is_host_invited()` call (`membership` is already part of the `current_state_events`). ([\#15731](https://github.com/matrix-org/synapse/issues/15731)) +- Remove superfluous `room_memberships` join from background update. ([\#15733](https://github.com/matrix-org/synapse/issues/15733)) +- Speed up typechecking CI. ([\#15752](https://github.com/matrix-org/synapse/issues/15752)) +- Bump minimum supported Rust version to 1.60.0. ([\#15768](https://github.com/matrix-org/synapse/issues/15768)) + +### Updates to locked dependencies + +* Bump importlib-metadata from 6.1.0 to 6.6.0. ([\#15711](https://github.com/matrix-org/synapse/issues/15711)) +* Bump library/redis from 6-bullseye to 7-bullseye in /docker. ([\#15712](https://github.com/matrix-org/synapse/issues/15712)) +* Bump log from 0.4.18 to 0.4.19. ([\#15761](https://github.com/matrix-org/synapse/issues/15761)) +* Bump phonenumbers from 8.13.11 to 8.13.13. ([\#15763](https://github.com/matrix-org/synapse/issues/15763)) +* Bump pyasn1 from 0.4.8 to 0.5.0. ([\#15713](https://github.com/matrix-org/synapse/issues/15713)) +* Bump pydantic from 1.10.8 to 1.10.9. ([\#15762](https://github.com/matrix-org/synapse/issues/15762)) +* Bump pyo3-log from 0.8.1 to 0.8.2. ([\#15759](https://github.com/matrix-org/synapse/issues/15759)) +* Bump pyopenssl from 23.1.1 to 23.2.0. ([\#15765](https://github.com/matrix-org/synapse/issues/15765)) +* Bump regex from 1.7.3 to 1.8.4. ([\#15769](https://github.com/matrix-org/synapse/issues/15769)) +* Bump sentry-sdk from 1.22.1 to 1.25.0. ([\#15714](https://github.com/matrix-org/synapse/issues/15714)) +* Bump sentry-sdk from 1.25.0 to 1.25.1. ([\#15764](https://github.com/matrix-org/synapse/issues/15764)) +* Bump serde from 1.0.163 to 1.0.164. ([\#15760](https://github.com/matrix-org/synapse/issues/15760)) +* Bump types-jsonschema from 4.17.0.7 to 4.17.0.8. ([\#15716](https://github.com/matrix-org/synapse/issues/15716)) +* Bump types-pyopenssl from 23.1.0.2 to 23.2.0.0. ([\#15766](https://github.com/matrix-org/synapse/issues/15766)) +* Bump types-requests from 2.31.0.0 to 2.31.0.1. ([\#15715](https://github.com/matrix-org/synapse/issues/15715)) + Synapse 1.85.2 (2023-06-08) =========================== diff --git a/changelog.d/12504.misc b/changelog.d/12504.misc deleted file mode 100644 index 0bebaa213d..0000000000 --- a/changelog.d/12504.misc +++ /dev/null @@ -1 +0,0 @@ -Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. diff --git a/changelog.d/14213.misc b/changelog.d/14213.misc deleted file mode 100644 index b0689f3d15..0000000000 --- a/changelog.d/14213.misc +++ /dev/null @@ -1 +0,0 @@ -Log when events are (maybe unexpectedly) filtered out of responses in tests. diff --git a/changelog.d/15388.feature b/changelog.d/15388.feature deleted file mode 100644 index 6cc55cafa2..0000000000 --- a/changelog.d/15388.feature +++ /dev/null @@ -1 +0,0 @@ -Stable support for [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. \ No newline at end of file diff --git a/changelog.d/15450.feature b/changelog.d/15450.feature deleted file mode 100644 index 2102381143..0000000000 --- a/changelog.d/15450.feature +++ /dev/null @@ -1 +0,0 @@ -Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. \ No newline at end of file diff --git a/changelog.d/15520.feature b/changelog.d/15520.feature deleted file mode 100644 index f4fd40ab94..0000000000 --- a/changelog.d/15520.feature +++ /dev/null @@ -1 +0,0 @@ -Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/changelog.d/15582.feature b/changelog.d/15582.feature deleted file mode 100644 index 00959500a5..0000000000 --- a/changelog.d/15582.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. diff --git a/changelog.d/15609.bugfix b/changelog.d/15609.bugfix deleted file mode 100644 index b5a990cfec..0000000000 --- a/changelog.d/15609.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correctly clear caches when we delete a room. diff --git a/changelog.d/15649.misc b/changelog.d/15649.misc deleted file mode 100644 index fca38abe0f..0000000000 --- a/changelog.d/15649.misc +++ /dev/null @@ -1 +0,0 @@ -Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. diff --git a/changelog.d/15650.misc b/changelog.d/15650.misc deleted file mode 100644 index 9bbad113e1..0000000000 --- a/changelog.d/15650.misc +++ /dev/null @@ -1 +0,0 @@ -Add support for tracing functions which return `Awaitable`s. diff --git a/changelog.d/15674.feature b/changelog.d/15674.feature deleted file mode 100644 index 68cf207dc0..0000000000 --- a/changelog.d/15674.feature +++ /dev/null @@ -1 +0,0 @@ -Add Syanpse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. diff --git a/changelog.d/15675.misc b/changelog.d/15675.misc deleted file mode 100644 index 05538fdbef..0000000000 --- a/changelog.d/15675.misc +++ /dev/null @@ -1 +0,0 @@ -Cache requests for user's devices over federation. diff --git a/changelog.d/15689.misc b/changelog.d/15689.misc deleted file mode 100644 index 4262cc9515..0000000000 --- a/changelog.d/15689.misc +++ /dev/null @@ -1 +0,0 @@ -Add fully qualified docker image names to Dockerfiles. diff --git a/changelog.d/15690.misc b/changelog.d/15690.misc deleted file mode 100644 index c6c259eb7d..0000000000 --- a/changelog.d/15690.misc +++ /dev/null @@ -1 +0,0 @@ -Remove some unused code. diff --git a/changelog.d/15694.misc b/changelog.d/15694.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15694.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15695.bugfix b/changelog.d/15695.bugfix deleted file mode 100644 index 99bf1fe05e..0000000000 --- a/changelog.d/15695.bugfix +++ /dev/null @@ -1 +0,0 @@ -Check permissions for enabling encryption earlier during room creation to avoid creating broken rooms. diff --git a/changelog.d/15697.misc b/changelog.d/15697.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/15697.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15705.feature b/changelog.d/15705.feature deleted file mode 100644 index e3cbb5a12e..0000000000 --- a/changelog.d/15705.feature +++ /dev/null @@ -1 +0,0 @@ -Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. diff --git a/changelog.d/15709.misc b/changelog.d/15709.misc deleted file mode 100644 index e9ce84a940..0000000000 --- a/changelog.d/15709.misc +++ /dev/null @@ -1 +0,0 @@ -Update docstring and traces on `maybe_backfill()` functions. diff --git a/changelog.d/15710.feature b/changelog.d/15710.feature deleted file mode 100644 index fe77a2fef6..0000000000 --- a/changelog.d/15710.feature +++ /dev/null @@ -1 +0,0 @@ -Speed up `/messages` by backfilling in the background when there are no backward extremities where we are directly paginating. diff --git a/changelog.d/15721.misc b/changelog.d/15721.misc deleted file mode 100644 index f4d892daf9..0000000000 --- a/changelog.d/15721.misc +++ /dev/null @@ -1 +0,0 @@ -Add context for when/why to use the `long_retries` option when sending Federation requests. diff --git a/changelog.d/15723.misc b/changelog.d/15723.misc deleted file mode 100644 index ba331adca7..0000000000 --- a/changelog.d/15723.misc +++ /dev/null @@ -1 +0,0 @@ -Removed some unused fields. diff --git a/changelog.d/15725.misc b/changelog.d/15725.misc deleted file mode 100644 index 6c7a8a41d8..0000000000 --- a/changelog.d/15725.misc +++ /dev/null @@ -1 +0,0 @@ -Update federation error to more plainly explain we can only authorize our own membership events. diff --git a/changelog.d/15726.misc b/changelog.d/15726.misc deleted file mode 100644 index 941e541e77..0000000000 --- a/changelog.d/15726.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent the `latest_deps` and `twisted_trunk` daily GitHub Actions workflows from running on forks of the codebase. \ No newline at end of file diff --git a/changelog.d/15729.misc b/changelog.d/15729.misc deleted file mode 100644 index 3940254305..0000000000 --- a/changelog.d/15729.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of user directory search. diff --git a/changelog.d/15731.misc b/changelog.d/15731.misc deleted file mode 100644 index 906bc26962..0000000000 --- a/changelog.d/15731.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant table join with `room_memberships` when doing a `is_host_joined()`/`is_host_invited()` call (`membership` is already part of the `current_state_events`). diff --git a/changelog.d/15732.doc b/changelog.d/15732.doc deleted file mode 100644 index b0e8639df7..0000000000 --- a/changelog.d/15732.doc +++ /dev/null @@ -1 +0,0 @@ -Simplify query to find participating servers in a room. diff --git a/changelog.d/15733.misc b/changelog.d/15733.misc deleted file mode 100644 index 3ae7be3c27..0000000000 --- a/changelog.d/15733.misc +++ /dev/null @@ -1 +0,0 @@ -Remove superfluous `room_memberships` join from background update. diff --git a/changelog.d/15740.feature b/changelog.d/15740.feature deleted file mode 100644 index fed342ea55..0000000000 --- a/changelog.d/15740.feature +++ /dev/null @@ -1 +0,0 @@ -Expose a metric reporting the database background update status. diff --git a/changelog.d/15752.misc b/changelog.d/15752.misc deleted file mode 100644 index 7e373b1275..0000000000 --- a/changelog.d/15752.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up typechecking CI. diff --git a/changelog.d/15768.misc b/changelog.d/15768.misc deleted file mode 100644 index bc4b86323c..0000000000 --- a/changelog.d/15768.misc +++ /dev/null @@ -1 +0,0 @@ -Bump minimum supported Rust version to 1.60.0. diff --git a/debian/changelog b/debian/changelog index a7503ea60a..1c13433c47 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.86.0~rc1) stable; urgency=medium + + * New Synapse release 1.86.0rc1. + + -- Synapse Packaging team Tue, 13 Jun 2023 14:30:45 +0200 + matrix-synapse-py3 (1.85.2) stable; urgency=medium * New Synapse release 1.85.2. diff --git a/pyproject.toml b/pyproject.toml index d42d7644d8..6bbbf95001 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.85.2" +version = "1.86.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 629115836f9d32aad8e2afcf98196753877d70fd Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 13 Jun 2023 14:38:53 +0200 Subject: [PATCH 130/562] Fix changelog typo --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 99c246a3bc..5412581eef 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,7 +8,7 @@ Features - Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. ([\#15450](https://github.com/matrix-org/synapse/issues/15450)) - Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#15520](https://github.com/matrix-org/synapse/issues/15520)) - Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. ([\#15582](https://github.com/matrix-org/synapse/issues/15582)) -- Add Syanpse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. ([\#15674](https://github.com/matrix-org/synapse/issues/15674)) +- Add Synapse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. ([\#15674](https://github.com/matrix-org/synapse/issues/15674)) - Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. ([\#15705](https://github.com/matrix-org/synapse/issues/15705)) - Speed up `/messages` by backfilling in the background when there are no backward extremities where we are directly paginating. ([\#15710](https://github.com/matrix-org/synapse/issues/15710)) - Expose a metric reporting the database background update status. ([\#15740](https://github.com/matrix-org/synapse/issues/15740)) From df945e0d7cd3cc78e54002115ae5e5793ed0a116 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 13 Jun 2023 12:07:55 -0400 Subject: [PATCH 131/562] Fix MSC3983 support: Use the unstable /keys/claim federation endpoint if multiple keys are requested (#15755) --- changelog.d/15755.misc | 1 + synapse/federation/federation_client.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15755.misc diff --git a/changelog.d/15755.misc b/changelog.d/15755.misc new file mode 100644 index 0000000000..a65340d380 --- /dev/null +++ b/changelog.d/15755.misc @@ -0,0 +1 @@ +Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index a2cf3a96c6..e5359ca558 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -260,7 +260,9 @@ class FederationClient(FederationBase): use_unstable = False for user_id, one_time_keys in query.items(): for device_id, algorithms in one_time_keys.items(): - if any(count > 1 for count in algorithms.values()): + # If more than one algorithm is requested, attempt to use the unstable + # endpoint. + if sum(algorithms.values()) > 1: use_unstable = True if algorithms: # For the stable query, choose only the first algorithm. @@ -296,6 +298,7 @@ class FederationClient(FederationBase): else: logger.debug("Skipping unstable claim client keys API") + # TODO Potentially attempt multiple queries and combine the results? return await self.transport_layer.claim_client_keys( user, destination, content, timeout ) From 0757d59ec4f3275e30907825b4dfb0fdbdce9006 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 13 Jun 2023 12:31:08 -0500 Subject: [PATCH 132/562] Avoid backfill when we already have messages to return (#15737) We now only block the client to backfill when we see a large gap in the events (more than 2 events missing in a row according to `depth`), more than 3 single-event holes, or not enough messages to fill the response. Otherwise, we return the messages directly to the client and backfill in the background for eventual consistency sake. Fix https://github.com/matrix-org/synapse/issues/15696 --- changelog.d/15737.feature | 1 + synapse/handlers/pagination.py | 137 +++++++++++++++++++++++++++------ 2 files changed, 116 insertions(+), 22 deletions(-) create mode 100644 changelog.d/15737.feature diff --git a/changelog.d/15737.feature b/changelog.d/15737.feature new file mode 100644 index 0000000000..9a547b5ebd --- /dev/null +++ b/changelog.d/15737.feature @@ -0,0 +1 @@ +Improve `/messages` response time by avoiding backfill when we already have messages to return. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index d5257acb7d..19b8728db9 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -40,6 +40,11 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# How many single event gaps we tolerate returning in a `/messages` response before we +# backfill and try to fill in the history. This is an arbitrarily picked number so feel +# free to tune it in the future. +BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3 + @attr.s(slots=True, auto_attribs=True) class PurgeStatus: @@ -486,35 +491,35 @@ class PaginationHandler: room_id, room_token.stream ) - if not use_admin_priviledge and membership == Membership.LEAVE: - # If they have left the room then clamp the token to be before - # they left the room, to save the effort of loading from the - # database. + # If they have left the room then clamp the token to be before + # they left the room, to save the effort of loading from the + # database. + if ( + pagin_config.direction == Direction.BACKWARDS + and not use_admin_priviledge + and membership == Membership.LEAVE + ): + # This is only None if the room is world_readable, in which case + # "Membership.JOIN" would have been returned and we should never hit + # this branch. + assert member_event_id - # This is only None if the room is world_readable, in which - # case "JOIN" would have been returned. - assert member_event_id - - leave_token = await self.store.get_topological_token_for_event( - member_event_id - ) - assert leave_token.topological is not None - - if leave_token.topological < curr_topo: - from_token = from_token.copy_and_replace( - StreamKeyType.ROOM, leave_token - ) - - await self.hs.get_federation_handler().maybe_backfill( - room_id, - curr_topo, - limit=pagin_config.limit, + leave_token = await self.store.get_topological_token_for_event( + member_event_id ) + assert leave_token.topological is not None + + if leave_token.topological < curr_topo: + from_token = from_token.copy_and_replace( + StreamKeyType.ROOM, leave_token + ) to_room_key = None if pagin_config.to_token: to_room_key = pagin_config.to_token.room_key + # Initially fetch the events from the database. With any luck, we can return + # these without blocking on backfill (handled below). events, next_key = await self.store.paginate_room_events( room_id=room_id, from_key=from_token.room_key, @@ -524,6 +529,94 @@ class PaginationHandler: event_filter=event_filter, ) + if pagin_config.direction == Direction.BACKWARDS: + # We use a `Set` because there can be multiple events at a given depth + # and we only care about looking at the unique continum of depths to + # find gaps. + event_depths: Set[int] = {event.depth for event in events} + sorted_event_depths = sorted(event_depths) + + # Inspect the depths of the returned events to see if there are any gaps + found_big_gap = False + number_of_gaps = 0 + previous_event_depth = ( + sorted_event_depths[0] if len(sorted_event_depths) > 0 else 0 + ) + for event_depth in sorted_event_depths: + # We don't expect a negative depth but we'll just deal with it in + # any case by taking the absolute value to get the true gap between + # any two integers. + depth_gap = abs(event_depth - previous_event_depth) + # A `depth_gap` of 1 is a normal continuous chain to the next event + # (1 <-- 2 <-- 3) so anything larger indicates a missing event (it's + # also possible there is no event at a given depth but we can't ever + # know that for sure) + if depth_gap > 1: + number_of_gaps += 1 + + # We only tolerate a small number single-event long gaps in the + # returned events because those are most likely just events we've + # failed to pull in the past. Anything longer than that is probably + # a sign that we're missing a decent chunk of history and we should + # try to backfill it. + # + # XXX: It's possible we could tolerate longer gaps if we checked + # that a given events `prev_events` is one that has failed pull + # attempts and we could just treat it like a dead branch of history + # for now or at least something that we don't need the block the + # client on to try pulling. + # + # XXX: If we had something like MSC3871 to indicate gaps in the + # timeline to the client, we could also get away with any sized gap + # and just have the client refetch the holes as they see fit. + if depth_gap > 2: + found_big_gap = True + break + previous_event_depth = event_depth + + # Backfill in the foreground if we found a big gap, have too many holes, + # or we don't have enough events to fill the limit that the client asked + # for. + missing_too_many_events = ( + number_of_gaps > BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD + ) + not_enough_events_to_fill_response = len(events) < pagin_config.limit + if ( + found_big_gap + or missing_too_many_events + or not_enough_events_to_fill_response + ): + did_backfill = ( + await self.hs.get_federation_handler().maybe_backfill( + room_id, + curr_topo, + limit=pagin_config.limit, + ) + ) + + # If we did backfill something, refetch the events from the database to + # catch anything new that might have been added since we last fetched. + if did_backfill: + events, next_key = await self.store.paginate_room_events( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, + ) + else: + # Otherwise, we can backfill in the background for eventual + # consistency's sake but we don't need to block the client waiting + # for a costly federation call and processing. + run_as_background_process( + "maybe_backfill_in_the_background", + self.hs.get_federation_handler().maybe_backfill, + room_id, + curr_topo, + limit=pagin_config.limit, + ) + next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key) # if no events are returned from pagination, that implies From 59ec4a0dc1404991935e3c29abe548affa0446bf Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 13 Jun 2023 19:51:47 +0200 Subject: [PATCH 133/562] Fix MSC3983 support: only one OTK per device was returned through federation (#15770) --- changelog.d/15770.bugfix | 1 + synapse/federation/federation_server.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15770.bugfix diff --git a/changelog.d/15770.bugfix b/changelog.d/15770.bugfix new file mode 100644 index 0000000000..a65340d380 --- /dev/null +++ b/changelog.d/15770.bugfix @@ -0,0 +1 @@ +Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9425b32507..61fa3b30af 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1016,7 +1016,9 @@ class FederationServer(FederationBase): for user_id, device_keys in result.items(): for device_id, keys in device_keys.items(): for key_id, key in keys.items(): - json_result.setdefault(user_id, {})[device_id] = {key_id: key} + json_result.setdefault(user_id, {}).setdefault(device_id, {})[ + key_id + ] = key logger.info( "Claimed one-time-keys: %s", From 553f2f53e7930d721d2070ffa45af6397f5ecb60 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 13 Jun 2023 13:22:06 -0700 Subject: [PATCH 134/562] Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas` (#15233) --- changelog.d/15233.misc | 1 + synapse/events/snapshot.py | 159 ++++++++++++------ synapse/storage/controllers/persist_events.py | 5 +- tests/events/test_snapshot.py | 3 +- tests/storage/test_event_chain.py | 5 +- tests/test_state.py | 11 +- 6 files changed, 126 insertions(+), 58 deletions(-) create mode 100644 changelog.d/15233.misc diff --git a/changelog.d/15233.misc b/changelog.d/15233.misc new file mode 100644 index 0000000000..1dff00bf3c --- /dev/null +++ b/changelog.d/15233.misc @@ -0,0 +1 @@ +Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index e7e8225b8e..a43498ed4d 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple import attr from immutabledict import immutabledict @@ -107,33 +107,32 @@ class EventContext(UnpersistedEventContextBase): state_delta_due_to_event: If `state_group` and `state_group_before_event` are not None then this is the delta of the state between the two groups. - prev_group: If it is known, ``state_group``'s prev_group. Note that this being - None does not necessarily mean that ``state_group`` does not have - a prev_group! + state_group_deltas: If not empty, this is a dict collecting a mapping of the state + difference between state groups. - If the event is a state event, this is normally the same as - ``state_group_before_event``. + The keys are a tuple of two integers: the initial group and final state group. + The corresponding value is a state map representing the state delta between + these state groups. - If ``state_group`` is None (ie, the event is an outlier), ``prev_group`` - will always also be ``None``. + The dictionary is expected to have at most two entries with state groups of: - Note that this *not* (necessarily) the state group associated with - ``_prev_state_ids``. + 1. The state group before the event and after the event. + 2. The state group preceding the state group before the event and the + state group before the event. - delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group`` - and ``state_group``. + This information is collected and stored as part of an optimization for persisting + events. partial_state: if True, we may be storing this event with a temporary, incomplete state. """ _storage: "StorageControllers" + state_group_deltas: Dict[Tuple[int, int], StateMap[str]] rejected: Optional[str] = None _state_group: Optional[int] = None state_group_before_event: Optional[int] = None _state_delta_due_to_event: Optional[StateMap[str]] = None - prev_group: Optional[int] = None - delta_ids: Optional[StateMap[str]] = None app_service: Optional[ApplicationService] = None partial_state: bool = False @@ -145,16 +144,14 @@ class EventContext(UnpersistedEventContextBase): state_group_before_event: Optional[int], state_delta_due_to_event: Optional[StateMap[str]], partial_state: bool, - prev_group: Optional[int] = None, - delta_ids: Optional[StateMap[str]] = None, + state_group_deltas: Dict[Tuple[int, int], StateMap[str]], ) -> "EventContext": return EventContext( storage=storage, state_group=state_group, state_group_before_event=state_group_before_event, state_delta_due_to_event=state_delta_due_to_event, - prev_group=prev_group, - delta_ids=delta_ids, + state_group_deltas=state_group_deltas, partial_state=partial_state, ) @@ -163,7 +160,7 @@ class EventContext(UnpersistedEventContextBase): storage: "StorageControllers", ) -> "EventContext": """Return an EventContext instance suitable for persisting an outlier event""" - return EventContext(storage=storage) + return EventContext(storage=storage, state_group_deltas={}) async def persist(self, event: EventBase) -> "EventContext": return self @@ -183,13 +180,15 @@ class EventContext(UnpersistedEventContextBase): "state_group": self._state_group, "state_group_before_event": self.state_group_before_event, "rejected": self.rejected, - "prev_group": self.prev_group, + "state_group_deltas": _encode_state_group_delta(self.state_group_deltas), "state_delta_due_to_event": _encode_state_dict( self._state_delta_due_to_event ), - "delta_ids": _encode_state_dict(self.delta_ids), "app_service_id": self.app_service.id if self.app_service else None, "partial_state": self.partial_state, + # add dummy delta_ids and prev_group for backwards compatibility + "delta_ids": None, + "prev_group": None, } @staticmethod @@ -204,17 +203,24 @@ class EventContext(UnpersistedEventContextBase): Returns: The event context. """ + # workaround for backwards/forwards compatibility: if the input doesn't have a value + # for "state_group_deltas" just assign an empty dict + state_group_deltas = input.get("state_group_deltas", None) + if state_group_deltas: + state_group_deltas = _decode_state_group_delta(state_group_deltas) + else: + state_group_deltas = {} + context = EventContext( # We use the state_group and prev_state_id stuff to pull the # current_state_ids out of the DB and construct prev_state_ids. storage=storage, state_group=input["state_group"], state_group_before_event=input["state_group_before_event"], - prev_group=input["prev_group"], + state_group_deltas=state_group_deltas, state_delta_due_to_event=_decode_state_dict( input["state_delta_due_to_event"] ), - delta_ids=_decode_state_dict(input["delta_ids"]), rejected=input["rejected"], partial_state=input.get("partial_state", False), ) @@ -349,7 +355,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase): _storage: "StorageControllers" state_group_before_event: Optional[int] state_group_after_event: Optional[int] - state_delta_due_to_event: Optional[dict] + state_delta_due_to_event: Optional[StateMap[str]] prev_group_for_state_group_before_event: Optional[int] delta_ids_to_state_group_before_event: Optional[StateMap[str]] partial_state: bool @@ -380,26 +386,16 @@ class UnpersistedEventContext(UnpersistedEventContextBase): events_and_persisted_context = [] for event, unpersisted_context in amended_events_and_context: - if event.is_state(): - context = EventContext( - storage=unpersisted_context._storage, - state_group=unpersisted_context.state_group_after_event, - state_group_before_event=unpersisted_context.state_group_before_event, - state_delta_due_to_event=unpersisted_context.state_delta_due_to_event, - partial_state=unpersisted_context.partial_state, - prev_group=unpersisted_context.state_group_before_event, - delta_ids=unpersisted_context.state_delta_due_to_event, - ) - else: - context = EventContext( - storage=unpersisted_context._storage, - state_group=unpersisted_context.state_group_after_event, - state_group_before_event=unpersisted_context.state_group_before_event, - state_delta_due_to_event=unpersisted_context.state_delta_due_to_event, - partial_state=unpersisted_context.partial_state, - prev_group=unpersisted_context.prev_group_for_state_group_before_event, - delta_ids=unpersisted_context.delta_ids_to_state_group_before_event, - ) + state_group_deltas = unpersisted_context._build_state_group_deltas() + + context = EventContext( + storage=unpersisted_context._storage, + state_group=unpersisted_context.state_group_after_event, + state_group_before_event=unpersisted_context.state_group_before_event, + state_delta_due_to_event=unpersisted_context.state_delta_due_to_event, + partial_state=unpersisted_context.partial_state, + state_group_deltas=state_group_deltas, + ) events_and_persisted_context.append((event, context)) return events_and_persisted_context @@ -452,11 +448,11 @@ class UnpersistedEventContext(UnpersistedEventContextBase): # if the event isn't a state event the state group doesn't change if not self.state_delta_due_to_event: - state_group_after_event = self.state_group_before_event + self.state_group_after_event = self.state_group_before_event # otherwise if it is a state event we need to get a state group for it else: - state_group_after_event = await self._storage.state.store_state_group( + self.state_group_after_event = await self._storage.state.store_state_group( event.event_id, event.room_id, prev_group=self.state_group_before_event, @@ -464,16 +460,81 @@ class UnpersistedEventContext(UnpersistedEventContextBase): current_state_ids=None, ) + state_group_deltas = self._build_state_group_deltas() + return EventContext.with_state( storage=self._storage, - state_group=state_group_after_event, + state_group=self.state_group_after_event, state_group_before_event=self.state_group_before_event, state_delta_due_to_event=self.state_delta_due_to_event, + state_group_deltas=state_group_deltas, partial_state=self.partial_state, - prev_group=self.state_group_before_event, - delta_ids=self.state_delta_due_to_event, ) + def _build_state_group_deltas(self) -> Dict[Tuple[int, int], StateMap]: + """ + Collect deltas between the state groups associated with this context + """ + state_group_deltas = {} + + # if we know the state group before the event and after the event, add them and the + # state delta between them to state_group_deltas + if self.state_group_before_event and self.state_group_after_event: + # if we have the state groups we should have the delta + assert self.state_delta_due_to_event is not None + state_group_deltas[ + ( + self.state_group_before_event, + self.state_group_after_event, + ) + ] = self.state_delta_due_to_event + + # the state group before the event may also have a state group which precedes it, if + # we have that and the state group before the event, add them and the state + # delta between them to state_group_deltas + if ( + self.prev_group_for_state_group_before_event + and self.state_group_before_event + ): + # if we have both state groups we should have the delta between them + assert self.delta_ids_to_state_group_before_event is not None + state_group_deltas[ + ( + self.prev_group_for_state_group_before_event, + self.state_group_before_event, + ) + ] = self.delta_ids_to_state_group_before_event + + return state_group_deltas + + +def _encode_state_group_delta( + state_group_delta: Dict[Tuple[int, int], StateMap[str]] +) -> List[Tuple[int, int, Optional[List[Tuple[str, str, str]]]]]: + if not state_group_delta: + return [] + + state_group_delta_encoded = [] + for key, value in state_group_delta.items(): + state_group_delta_encoded.append((key[0], key[1], _encode_state_dict(value))) + + return state_group_delta_encoded + + +def _decode_state_group_delta( + input: List[Tuple[int, int, List[Tuple[str, str, str]]]] +) -> Dict[Tuple[int, int], StateMap[str]]: + if not input: + return {} + + state_group_deltas = {} + for state_group_1, state_group_2, state_dict in input: + state_map = _decode_state_dict(state_dict) + assert state_map is not None + state_group_deltas[(state_group_1, state_group_2)] = state_map + + return state_group_deltas + def _encode_state_dict( state_dict: Optional[StateMap[str]], diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index f1d2c71c91..35c0680365 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -839,9 +839,8 @@ class EventsPersistenceStorageController: "group" % (ev.event_id,) ) continue - - if ctx.prev_group: - state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids + if ctx.state_group_deltas: + state_group_deltas.update(ctx.state_group_deltas) # We need to map the event_ids to their state groups. First, let's # check if the event is one we're persisting, in which case we can diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py index 6687c28e8f..b5e42f9600 100644 --- a/tests/events/test_snapshot.py +++ b/tests/events/test_snapshot.py @@ -101,8 +101,7 @@ class TestEventContext(unittest.HomeserverTestCase): self.assertEqual( context.state_group_before_event, d_context.state_group_before_event ) - self.assertEqual(context.prev_group, d_context.prev_group) - self.assertEqual(context.delta_ids, d_context.delta_ids) + self.assertEqual(context.state_group_deltas, d_context.state_group_deltas) self.assertEqual(context.app_service, d_context.app_service) self.assertEqual( diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index e39b63edac..48ebfadaab 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -401,7 +401,10 @@ class EventChainStoreTestCase(HomeserverTestCase): assert persist_events_store is not None persist_events_store._store_event_txn( txn, - [(e, EventContext(self.hs.get_storage_controllers())) for e in events], + [ + (e, EventContext(self.hs.get_storage_controllers(), {})) + for e in events + ], ) # Actually call the function that calculates the auth chain stuff. diff --git a/tests/test_state.py b/tests/test_state.py index 7a49b87953..eded38c766 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -555,10 +555,15 @@ class StateTestCase(unittest.TestCase): (e.event_id for e in old_state + [event]), current_state_ids.values() ) - self.assertIsNotNone(context.state_group_before_event) + assert context.state_group_before_event is not None + assert context.state_group is not None + self.assertEqual( + context.state_group_deltas.get( + (context.state_group_before_event, context.state_group) + ), + {(event.type, event.state_key): event.event_id}, + ) self.assertNotEqual(context.state_group_before_event, context.state_group) - self.assertEqual(context.state_group_before_event, context.prev_group) - self.assertEqual({("state", ""): event.event_id}, context.delta_ids) @defer.inlineCallbacks def test_trivial_annotate_message( From 8ddb2de55387d54bac53138f374f55c7608991ce Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 13 Jun 2023 16:34:54 -0500 Subject: [PATCH 135/562] Document `looping_call()` functionality that will wait for the given function to finish before scheduling another (#15772) Thanks to @erikjohnston for clarifying, https://github.com/matrix-org/synapse/pull/15743#discussion_r1226544457 We don't have to worry about calls stacking up if the given function takes longer than the scheduled time. --- changelog.d/15772.doc | 1 + synapse/util/__init__.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/15772.doc diff --git a/changelog.d/15772.doc b/changelog.d/15772.doc new file mode 100644 index 0000000000..4d6c933c71 --- /dev/null +++ b/changelog.d/15772.doc @@ -0,0 +1 @@ +Document `looping_call()` functionality that will wait for the given function to finish before scheduling another. diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 7ea0c4c36b..9f3b8741c1 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -116,6 +116,11 @@ class Clock: Waits `msec` initially before calling `f` for the first time. + If the function given to `looping_call` returns an awaitable/deferred, the next + call isn't scheduled until after the returned awaitable has finished. We get + this functionality thanks to this function being a thin wrapper around + `twisted.internet.task.LoopingCall`. + Note that the function will be called with no logcontext, so if it is anything other than trivial, you probably want to wrap it in run_as_background_process. From 21fea6b7493533985f7fa14924949514b5a356e2 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 14 Jun 2023 03:42:18 -0500 Subject: [PATCH 136/562] Prefill events after invalidate not before when persisting events (#15758) Fixes #15757 --- changelog.d/15758.bugfix | 1 + synapse/storage/databases/main/events.py | 15 ++++-- .../storage/databases/main/events_worker.py | 2 +- synapse/util/caches/lrucache.py | 8 ++- .../databases/main/test_events_worker.py | 49 +++++++++++++++++++ 5 files changed, 70 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15758.bugfix diff --git a/changelog.d/15758.bugfix b/changelog.d/15758.bugfix new file mode 100644 index 0000000000..cabe25ca24 --- /dev/null +++ b/changelog.d/15758.bugfix @@ -0,0 +1 @@ +Avoid invalidating a cache that was just prefilled. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index e2e6eb479f..44af3357af 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1729,13 +1729,22 @@ class PersistEventsStore: if not row["rejects"] and not row["redacts"]: to_prefill.append(EventCacheEntry(event=event, redacted_event=None)) - async def prefill() -> None: + async def external_prefill() -> None: for cache_entry in to_prefill: - await self.store._get_event_cache.set( + await self.store._get_event_cache.set_external( (cache_entry.event.event_id,), cache_entry ) - txn.async_call_after(prefill) + def local_prefill() -> None: + for cache_entry in to_prefill: + self.store._get_event_cache.set_local( + (cache_entry.event.event_id,), cache_entry + ) + + # The order these are called here is not as important as knowing that after the + # transaction is finished, the async_call_after will run before the call_after. + txn.async_call_after(external_prefill) + txn.call_after(local_prefill) def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: assert event.redacts is not None diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index d93ffc4efa..7e7648c951 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -883,7 +883,7 @@ class EventsWorkerStore(SQLBaseStore): async def _invalidate_async_get_event_cache(self, event_id: str) -> None: """ - Invalidates an event in the asyncronous get event cache, which may be remote. + Invalidates an event in the asynchronous get event cache, which may be remote. Arguments: event_id: the event ID to invalidate diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 6137c85e10..be6554319a 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -842,7 +842,13 @@ class AsyncLruCache(Generic[KT, VT]): return self._lru_cache.get(key, update_metrics=update_metrics) async def set(self, key: KT, value: VT) -> None: - self._lru_cache.set(key, value) + # This will add the entries in the correct order, local first external second + self.set_local(key, value) + await self.set_external(key, value) + + async def set_external(self, key: KT, value: VT) -> None: + # This method should add an entry to any configured external cache, in this case noop. + pass def set_local(self, key: KT, value: VT) -> None: self._lru_cache.set(key, value) diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 788500e38f..b223dc750b 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -139,6 +139,55 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase): # That should result in a single db query to lookup self.assertEqual(ctx.get_resource_usage().db_txn_count, 1) + def test_persisting_event_prefills_get_event_cache(self) -> None: + """ + Test to make sure that the `_get_event_cache` is prefilled after we persist an + event and returns the updated value. + """ + event, event_context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + sender=self.user, + type="test_event_type", + content={"body": "conflabulation"}, + ) + ) + + # First, check `_get_event_cache` for the event we just made + # to verify it's not in the cache. + res = self.store._get_event_cache.get_local((event.event_id,)) + self.assertEqual(res, None, "Event was cached when it should not have been.") + + with LoggingContext(name="test") as ctx: + # Persist the event which should invalidate then prefill the + # `_get_event_cache` so we don't return stale values. + # Side Note: Apparently, persisting an event isn't a transaction in the + # sense that it is recorded in the LoggingContext + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.get_success( + persistence.persist_event( + event, + event_context, + ) + ) + + # Check `_get_event_cache` again and we should see the updated fact + # that we now have the event cached after persisting it. + res = self.store._get_event_cache.get_local((event.event_id,)) + self.assertEqual(res.event, event, "Event not cached as expected.") # type: ignore + + # Try and fetch the event from the database. + self.get_success(self.store.get_event(event.event_id)) + + # Verify that the database hit was avoided. + self.assertEqual( + ctx.get_resource_usage().evt_db_fetch_count, + 0, + "Database was hit, which would not happen if event was cached.", + ) + def test_invalidate_cache_by_room_id(self) -> None: """ Test to make sure that all events associated with the given `(room_id,)` From 14f9d9b4520099118f009ae4f4c6b11b779af499 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 14 Jun 2023 11:53:55 +0200 Subject: [PATCH 137/562] Fix empty scope when having version mismatch between workers (#15774) --- changelog.d/15774.bugfix | 1 + synapse/types/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15774.bugfix diff --git a/changelog.d/15774.bugfix b/changelog.d/15774.bugfix new file mode 100644 index 0000000000..c24d6c25e4 --- /dev/null +++ b/changelog.d/15774.bugfix @@ -0,0 +1 @@ +Fix an error when having workers of different versions running. diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index dfc95e8ebb..095be070e0 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -177,7 +177,7 @@ class Requester: user=UserID.from_string(input["user_id"]), access_token_id=input["access_token_id"], is_guest=input["is_guest"], - scope=set(input["scope"]), + scope=set(input.get("scope", [])), shadow_banned=input["shadow_banned"], device_id=input["device_id"], app_service=appservice, From ef0d3d7bd941b497ad8291c58bcc53700e08b999 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 14 Jun 2023 11:55:09 +0200 Subject: [PATCH 138/562] Revert "Allow for the configuration of max request retries and min/max retry delays in the matrix federation client (#12504)" This reverts commit d84e66144dc12dacf71c987a2ba802dd59c0b68e. --- CHANGES.md | 1 - .../configuration/config_documentation.md | 26 ------------------- synapse/config/federation.py | 10 ------- synapse/http/matrixfederationclient.py | 21 +++++++-------- tests/http/test_matrixfederationclient.py | 20 +------------- 5 files changed, 10 insertions(+), 68 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5412581eef..d898593664 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -30,7 +30,6 @@ Improved Documentation Internal Changes ---------------- -- Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. ([\#12504](https://github.com/matrix-org/synapse/issues/12504)) - Log when events are (maybe unexpectedly) filtered out of responses in tests. ([\#14213](https://github.com/matrix-org/synapse/issues/14213)) - Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. ([\#15649](https://github.com/matrix-org/synapse/issues/15649)) - Add support for tracing functions which return `Awaitable`s. ([\#15650](https://github.com/matrix-org/synapse/issues/15650)) diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 8426de0417..0cf6e075ff 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1196,32 +1196,6 @@ Example configuration: allow_device_name_lookup_over_federation: true ``` --- -### `federation` - -The federation section defines some sub-options related to federation. - -The following options are related to configuring timeout and retry logic for one request, -independently of the others. -Short retry algorithm is used when something or someone will wait for the request to have an -answer, while long retry is used for requests that happen in the background, -like sending a federation transaction. - -* `client_timeout`: timeout for the federation requests in seconds. Default to 60s. -* `max_short_retry_delay`: maximum delay to be used for the short retry algo in seconds. Default to 2s. -* `max_long_retry_delay`: maximum delay to be used for the short retry algo in seconds. Default to 60s. -* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts. -* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts. - -Example configuration: -```yaml -federation: - client_timeout: 180 - max_short_retry_delay: 7 - max_long_retry_delay: 100 - max_short_retries: 5 - max_long_retries: 20 -``` ---- ## Caching Options related to caching. diff --git a/synapse/config/federation.py b/synapse/config/federation.py index d21f7fd02a..336fca578a 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -22,8 +22,6 @@ class FederationConfig(Config): section = "federation" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - federation_config = config.setdefault("federation", {}) - # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist: Optional[dict] = None federation_domain_whitelist = config.get("federation_domain_whitelist", None) @@ -51,13 +49,5 @@ class FederationConfig(Config): "allow_device_name_lookup_over_federation", False ) - # Allow for the configuration of timeout, max request retries - # and min/max retry delays in the matrix federation client. - self.client_timeout = federation_config.get("client_timeout", 60) - self.max_long_retry_delay = federation_config.get("max_long_retry_delay", 60) - self.max_short_retry_delay = federation_config.get("max_short_retry_delay", 2) - self.max_long_retries = federation_config.get("max_long_retries", 10) - self.max_short_retries = federation_config.get("max_short_retries", 3) - _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index ed36825b67..abb5ae5815 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -95,6 +95,8 @@ incoming_responses_counter = Counter( ) +MAX_LONG_RETRIES = 10 +MAX_SHORT_RETRIES = 3 MAXINT = sys.maxsize @@ -404,12 +406,7 @@ class MatrixFederationHttpClient: self.clock = hs.get_clock() self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") - self.default_timeout = hs.config.federation.client_timeout - - self.max_long_retry_delay = hs.config.federation.max_long_retry_delay - self.max_short_retry_delay = hs.config.federation.max_short_retry_delay - self.max_long_retries = hs.config.federation.max_long_retries - self.max_short_retries = hs.config.federation.max_short_retries + self.default_timeout = 60 self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) @@ -586,9 +583,9 @@ class MatrixFederationHttpClient: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: - retries_left = self.max_long_retries + retries_left = MAX_LONG_RETRIES else: - retries_left = self.max_short_retries + retries_left = MAX_SHORT_RETRIES url_bytes = request.uri url_str = url_bytes.decode("ascii") @@ -733,12 +730,12 @@ class MatrixFederationHttpClient: if retries_left and not timeout: if long_retries: - delay = 4 ** (self.max_long_retries + 1 - retries_left) - delay = min(delay, self.max_long_retry_delay) + delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) + delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: - delay = 0.5 * 2 ** (self.max_short_retries - retries_left) - delay = min(delay, self.max_short_retry_delay) + delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) + delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) logger.debug( diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 8565f8ac64..0dfc03ce50 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -40,7 +40,7 @@ from synapse.server import HomeServer from synapse.util import Clock from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase, override_config +from tests.unittest import HomeserverTestCase def check_logcontext(context: LoggingContextOrSentinel) -> None: @@ -640,21 +640,3 @@ class FederationClientTests(HomeserverTestCase): self.cl.build_auth_headers( b"", b"GET", b"https://example.com", destination_is=b"" ) - - @override_config( - { - "federation": { - "client_timeout": 180, - "max_long_retry_delay": 100, - "max_short_retry_delay": 7, - "max_long_retries": 20, - "max_short_retries": 5, - } - } - ) - def test_configurable_retry_and_delay_values(self) -> None: - self.assertEqual(self.cl.default_timeout, 180) - self.assertEqual(self.cl.max_long_retry_delay, 100) - self.assertEqual(self.cl.max_short_retry_delay, 7) - self.assertEqual(self.cl.max_long_retries, 20) - self.assertEqual(self.cl.max_short_retries, 5) From 825c5909de642c9c6494ef464684e29630d197b5 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 14 Jun 2023 12:16:41 +0200 Subject: [PATCH 139/562] 1.86.0rc2 --- CHANGES.md | 11 +++++++++++ changelog.d/15774.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 18 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15774.bugfix diff --git a/CHANGES.md b/CHANGES.md index d898593664..f2f39c3b6e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,17 @@ +Synapse 1.86.0rc2 (2023-06-14) +============================== + +Bugfixes +-------- + +- Fix an error when having workers of different versions running. ([\#15774](https://github.com/matrix-org/synapse/issues/15774)) + + Synapse 1.86.0rc1 (2023-06-13) ============================== +This version was tagged but never released. + Features -------- diff --git a/changelog.d/15774.bugfix b/changelog.d/15774.bugfix deleted file mode 100644 index c24d6c25e4..0000000000 --- a/changelog.d/15774.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an error when having workers of different versions running. diff --git a/debian/changelog b/debian/changelog index 1c13433c47..81b71ba342 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.86.0~rc2) stable; urgency=medium + + * New Synapse release 1.86.0rc2. + + -- Synapse Packaging team Wed, 14 Jun 2023 12:16:27 +0200 + matrix-synapse-py3 (1.86.0~rc1) stable; urgency=medium * New Synapse release 1.86.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 6bbbf95001..097bd03943 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.86.0rc1" +version = "1.86.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 1404f68a03e684434dea6b2a9a5c2739c719549d Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Wed, 14 Jun 2023 17:42:33 +0300 Subject: [PATCH 140/562] Fix joining rooms through aliases where the alias server isn't a real homeserver (#15776) --- changelog.d/15776.bugfix | 1 + synapse/handlers/room_member.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15776.bugfix diff --git a/changelog.d/15776.bugfix b/changelog.d/15776.bugfix new file mode 100644 index 0000000000..f146a85f1a --- /dev/null +++ b/changelog.d/15776.bugfix @@ -0,0 +1 @@ +Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index af0ca5c26d..55df34bd06 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1498,7 +1498,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # put the server which owns the alias at the front of the server list. if room_alias.domain in servers: servers.remove(room_alias.domain) - servers.insert(0, room_alias.domain) + servers.insert(0, room_alias.domain) return RoomID.from_string(room_id), servers From d93912042191d30ff1f7aa41d9f0779a609caca8 Mon Sep 17 00:00:00 2001 From: Josh Qou <97894002+joshqou@users.noreply.github.com> Date: Thu, 15 Jun 2023 14:23:27 +0100 Subject: [PATCH 141/562] Fix unsafe hotserving behaviour for non-multimedia uploads. (#15680) * Fix unsafe hotserving behaviour for non-multimedia uploads. * invert disposition assert * test_media_storage.py: run lint * test_base.py: /inline/attachment/s * Only return attachment for disposition type, update tests * Update synapse/media/_base.py Co-authored-by: Patrick Cloke * Update changelog.d/15680.bugfix Co-authored-by: Patrick Cloke * add attribution * Update changelog. --------- Co-authored-by: Patrick Cloke --- changelog.d/15680.bugfix | 1 + synapse/media/_base.py | 15 ++++++++++++--- tests/media/test_base.py | 12 ++++++------ tests/media/test_media_storage.py | 20 ++++++++++---------- 4 files changed, 29 insertions(+), 19 deletions(-) create mode 100644 changelog.d/15680.bugfix diff --git a/changelog.d/15680.bugfix b/changelog.d/15680.bugfix new file mode 100644 index 0000000000..04ac19b4ec --- /dev/null +++ b/changelog.d/15680.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index ef8334ae25..20cb8b9010 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -152,6 +152,9 @@ def add_file_headers( content_type = media_type request.setHeader(b"Content-Type", content_type.encode("UTF-8")) + + # Use a Content-Disposition of attachment to force download of media. + disposition = "attachment" if upload_name: # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. # @@ -173,11 +176,17 @@ def add_file_headers( # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we # may as well just do the filename* version. if _can_encode_filename_as_token(upload_name): - disposition = "inline; filename=%s" % (upload_name,) + disposition = "%s; filename=%s" % ( + disposition, + upload_name, + ) else: - disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),) + disposition = "%s; filename*=utf-8''%s" % ( + disposition, + _quote(upload_name), + ) - request.setHeader(b"Content-Disposition", disposition.encode("ascii")) + request.setHeader(b"Content-Disposition", disposition.encode("ascii")) # cache for at least a day. # XXX: we might want to turn this off for data we don't want to diff --git a/tests/media/test_base.py b/tests/media/test_base.py index 66498c744d..4728c80969 100644 --- a/tests/media/test_base.py +++ b/tests/media/test_base.py @@ -20,12 +20,12 @@ from tests import unittest class GetFileNameFromHeadersTests(unittest.TestCase): # input -> expected result TEST_CASES = { - b"inline; filename=abc.txt": "abc.txt", - b'inline; filename="azerty"': "azerty", - b'inline; filename="aze%20rty"': "aze%20rty", - b'inline; filename="aze"rty"': 'aze"rty', - b'inline; filename="azer;ty"': "azer;ty", - b"inline; filename*=utf-8''foo%C2%A3bar": "foo£bar", + b"attachment; filename=abc.txt": "abc.txt", + b'attachment; filename="azerty"': "azerty", + b'attachment; filename="aze%20rty"': "aze%20rty", + b'attachment; filename="aze"rty"': 'aze"rty', + b'attachment; filename="azer;ty"': "azer;ty", + b"attachment; filename*=utf-8''foo%C2%A3bar": "foo£bar", } def tests(self) -> None: diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index f0f2da65db..ea0051dde4 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -317,7 +317,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): def test_handle_missing_content_type(self) -> None: channel = self._req( - b"inline; filename=out" + self.test_image.extension, + b"attachment; filename=out" + self.test_image.extension, include_content_type=False, ) headers = channel.headers @@ -331,7 +331,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): If the filename is filename= then Synapse will decode it as an ASCII string, and use filename= in the response. """ - channel = self._req(b"inline; filename=out" + self.test_image.extension) + channel = self._req(b"attachment; filename=out" + self.test_image.extension) headers = channel.headers self.assertEqual( @@ -339,7 +339,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.assertEqual( headers.getRawHeaders(b"Content-Disposition"), - [b"inline; filename=out" + self.test_image.extension], + [b"attachment; filename=out" + self.test_image.extension], ) def test_disposition_filenamestar_utf8escaped(self) -> None: @@ -350,7 +350,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): """ filename = parse.quote("\u2603".encode()).encode("ascii") channel = self._req( - b"inline; filename*=utf-8''" + filename + self.test_image.extension + b"attachment; filename*=utf-8''" + filename + self.test_image.extension ) headers = channel.headers @@ -359,13 +359,13 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.assertEqual( headers.getRawHeaders(b"Content-Disposition"), - [b"inline; filename*=utf-8''" + filename + self.test_image.extension], + [b"attachment; filename*=utf-8''" + filename + self.test_image.extension], ) def test_disposition_none(self) -> None: """ - If there is no filename, one isn't passed on in the Content-Disposition - of the request. + If there is no filename, Content-Disposition should only + be a disposition type. """ channel = self._req(None) @@ -373,7 +373,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.assertEqual( headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type] ) - self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None) + self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), [b"attachment"]) def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" @@ -612,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): Tests that the `X-Robots-Tag` header is present, which informs web crawlers to not index, archive, or follow links in media. """ - channel = self._req(b"inline; filename=out" + self.test_image.extension) + channel = self._req(b"attachment; filename=out" + self.test_image.extension) headers = channel.headers self.assertEqual( @@ -625,7 +625,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): Test that the Cross-Origin-Resource-Policy header is set to "cross-origin" allowing web clients to embed media from the downloads API. """ - channel = self._req(b"inline; filename=out" + self.test_image.extension) + channel = self._req(b"attachment; filename=out" + self.test_image.extension) headers = channel.headers From f63d4a3a65e95d3845c43a9dd2893605b06f164a Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Fri, 16 Jun 2023 12:15:12 +0200 Subject: [PATCH 142/562] Regularly try to wake up dests instead of waiting for next PDU/EDU (#15743) --- changelog.d/15743.misc | 1 + synapse/federation/sender/__init__.py | 34 +++++++++----------- tests/federation/test_federation_catch_up.py | 22 ++++++------- 3 files changed, 26 insertions(+), 31 deletions(-) create mode 100644 changelog.d/15743.misc diff --git a/changelog.d/15743.misc b/changelog.d/15743.misc new file mode 100644 index 0000000000..b95eed929e --- /dev/null +++ b/changelog.d/15743.misc @@ -0,0 +1 @@ +Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index f3bdc5a4d2..97abbdee18 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -109,10 +109,8 @@ was enabled*, Catch-Up Mode is exited and we return to `_transaction_transmissio If a remote server is unreachable over federation, we back off from that server, with an exponentially-increasing retry interval. -Whilst we don't automatically retry after the interval, we prevent making new attempts -until such time as the back-off has cleared. -Once the back-off is cleared and a new PDU or EDU arrives for transmission, the transmission -loop resumes and empties the queue by making federation requests. +We automatically retry after the retry interval expires (roughly, the logic to do so +being triggered every minute). If the backoff grows too large (> 1 hour), the in-memory queue is emptied (to prevent unbounded growth) and Catch-Up Mode is entered. @@ -145,7 +143,6 @@ from prometheus_client import Counter from typing_extensions import Literal from twisted.internet import defer -from twisted.internet.interfaces import IDelayedCall import synapse.metrics from synapse.api.presence import UserPresenceState @@ -184,14 +181,18 @@ sent_pdus_destination_dist_total = Counter( "Total number of PDUs queued for sending across all destinations", ) -# Time (in s) after Synapse's startup that we will begin to wake up destinations -# that have catch-up outstanding. -CATCH_UP_STARTUP_DELAY_SEC = 15 +# Time (in s) to wait before trying to wake up destinations that have +# catch-up outstanding. This will also be the delay applied at startup +# before trying the same. +# Please note that rate limiting still applies, so while the loop is +# executed every X seconds the destinations may not be wake up because +# they are being rate limited following previous attempt failures. +WAKEUP_RETRY_PERIOD_SEC = 60 # Time (in s) to wait in between waking up each destination, i.e. one destination -# will be woken up every seconds after Synapse's startup until we have woken -# every destination has outstanding catch-up. -CATCH_UP_STARTUP_INTERVAL_SEC = 5 +# will be woken up every seconds until we have woken every destination +# has outstanding catch-up. +WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5 class AbstractFederationSender(metaclass=abc.ABCMeta): @@ -415,12 +416,10 @@ class FederationSender(AbstractFederationSender): / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second ) - # wake up destinations that have outstanding PDUs to be caught up - self._catchup_after_startup_timer: Optional[ - IDelayedCall - ] = self.clock.call_later( - CATCH_UP_STARTUP_DELAY_SEC, + # Regularly wake up destinations that have outstanding PDUs to be caught up + self.clock.looping_call( run_as_background_process, + WAKEUP_RETRY_PERIOD_SEC * 1000.0, "wake_destinations_needing_catchup", self._wake_destinations_needing_catchup, ) @@ -966,7 +965,6 @@ class FederationSender(AbstractFederationSender): if not destinations_to_wake: # finished waking all destinations! - self._catchup_after_startup_timer = None break last_processed = destinations_to_wake[-1] @@ -983,4 +981,4 @@ class FederationSender(AbstractFederationSender): last_processed, ) self.wake_destination(destination) - await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC) + await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC) diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 391ae51707..b290b020a2 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -431,28 +431,24 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): # ACT: call _wake_destinations_needing_catchup # patch wake_destination to just count the destinations instead - woken = [] + woken = set() def wake_destination_track(destination: str) -> None: - woken.append(destination) + woken.add(destination) self.federation_sender.wake_destination = wake_destination_track # type: ignore[assignment] - # cancel the pre-existing timer for _wake_destinations_needing_catchup - # this is because we are calling it manually rather than waiting for it - # to be called automatically - assert self.federation_sender._catchup_after_startup_timer is not None - self.federation_sender._catchup_after_startup_timer.cancel() - - self.get_success( - self.federation_sender._wake_destinations_needing_catchup(), by=5.0 - ) + # We wait quite long so that all dests can be woken up, since there is a delay + # between them. + self.pump(by=5.0) # ASSERT (_wake_destinations_needing_catchup): # - all remotes are woken up, save for zzzerver self.assertNotIn("zzzerver", woken) - # - all destinations are woken exactly once; they appear once in woken. - self.assertCountEqual(woken, server_names[:-1]) + # - all destinations are woken, potentially more than once, since the + # wake up is called regularly and we don't ack in this test that a transaction + # has been successfully sent. + self.assertCountEqual(woken, set(server_names[:-1])) def test_not_latest_event(self) -> None: """Test that we send the latest event in the room even if its not ours.""" From 0618bf94cdc56631e670b4e93e4dfaeae2162e73 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Fri, 16 Jun 2023 14:17:02 +0200 Subject: [PATCH 143/562] push rules: fix internal conversion from _type to value (#15781) Also fix wrong rule names for `is_user_mention` and `is_room_mention`. --- changelog.d/15781.bugfix | 1 + rust/src/push/base_rules.rs | 4 +- synapse/push/clientformat.py | 26 ++++----- tests/rest/client/test_push_rule_attrs.py | 67 +++++++++++++++++++++++ 4 files changed, 81 insertions(+), 17 deletions(-) create mode 100644 changelog.d/15781.bugfix diff --git a/changelog.d/15781.bugfix b/changelog.d/15781.bugfix new file mode 100644 index 0000000000..5faf59afee --- /dev/null +++ b/changelog.d/15781.bugfix @@ -0,0 +1 @@ +Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`. \ No newline at end of file diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 9d6c304d92..7eea9313f0 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -142,7 +142,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed("global/override/.m.is_user_mention"), + rule_id: Cow::Borrowed("global/override/.m.rule.is_user_mention"), priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known( KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition { @@ -163,7 +163,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed("global/override/.m.is_room_mention"), + rule_id: Cow::Borrowed("global/override/.m.rule.is_room_mention"), priority_class: 5, conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 88b52c26a0..735cef0aed 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -41,12 +41,7 @@ def format_push_rules_for_user( rulearray.append(template_rule) - for type_key in ("pattern", "value"): - type_value = template_rule.pop(f"{type_key}_type", None) - if type_value == "user_id": - template_rule[type_key] = user.to_string() - elif type_value == "user_localpart": - template_rule[type_key] = user.localpart + _convert_type_to_value(template_rule, user) template_rule["enabled"] = enabled @@ -63,19 +58,20 @@ def format_push_rules_for_user( for c in template_rule["conditions"]: c.pop("_cache_key", None) - pattern_type = c.pop("pattern_type", None) - if pattern_type == "user_id": - c["pattern"] = user.to_string() - elif pattern_type == "user_localpart": - c["pattern"] = user.localpart - - sender_type = c.pop("sender_type", None) - if sender_type == "user_id": - c["sender"] = user.to_string() + _convert_type_to_value(c, user) return rules +def _convert_type_to_value(rule_or_cond: Dict[str, Any], user: UserID) -> None: + for type_key in ("pattern", "value"): + type_value = rule_or_cond.pop(f"{type_key}_type", None) + if type_value == "user_id": + rule_or_cond[type_key] = user.to_string() + elif type_value == "user_localpart": + rule_or_cond[type_key] = user.localpart + + def _add_empty_priority_class_arrays(d: Dict[str, list]) -> Dict[str, list]: for pc in PRIORITY_CLASS_MAP.keys(): d[pc] = [] diff --git a/tests/rest/client/test_push_rule_attrs.py b/tests/rest/client/test_push_rule_attrs.py index 4f875b9289..5aca74475f 100644 --- a/tests/rest/client/test_push_rule_attrs.py +++ b/tests/rest/client/test_push_rule_attrs.py @@ -412,3 +412,70 @@ class PushRuleAttributesTestCase(HomeserverTestCase): ) self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_contains_user_name(self) -> None: + """ + Tests that `contains_user_name` rule is present and have proper value in `pattern`. + """ + username = "bob" + self.register_user(username, "pass") + token = self.login(username, "pass") + + channel = self.make_request( + "GET", + "/pushrules/global/content/.m.rule.contains_user_name", + access_token=token, + ) + + self.assertEqual(channel.code, 200) + + self.assertEqual( + { + "rule_id": ".m.rule.contains_user_name", + "default": True, + "enabled": True, + "pattern": username, + "actions": [ + "notify", + {"set_tweak": "highlight"}, + {"set_tweak": "sound", "value": "default"}, + ], + }, + channel.json_body, + ) + + def test_is_user_mention(self) -> None: + """ + Tests that `is_user_mention` rule is present and have proper value in `value`. + """ + user = self.register_user("bob", "pass") + token = self.login("bob", "pass") + + channel = self.make_request( + "GET", + "/pushrules/global/override/.m.rule.is_user_mention", + access_token=token, + ) + + self.assertEqual(channel.code, 200) + + self.assertEqual( + { + "rule_id": ".m.rule.is_user_mention", + "default": True, + "enabled": True, + "conditions": [ + { + "kind": "event_property_contains", + "key": "content.m\\.mentions.user_ids", + "value": user, + } + ], + "actions": [ + "notify", + {"set_tweak": "highlight"}, + {"set_tweak": "sound", "value": "default"}, + ], + }, + channel.json_body, + ) From 2ac6c3bbb535677bd62b3df425dd1755dba79b66 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 16 Jun 2023 15:25:44 +0100 Subject: [PATCH 144/562] Don't always lock "user_ips" table when performing non-native upsert (#15788) --- changelog.d/15788.bugfix | 1 + synapse/storage/database.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15788.bugfix diff --git a/changelog.d/15788.bugfix b/changelog.d/15788.bugfix new file mode 100644 index 0000000000..d22aae7baf --- /dev/null +++ b/changelog.d/15788.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend. \ No newline at end of file diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 10fa6c4802..7e49ae11bc 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1529,7 +1529,7 @@ class DatabasePool: # Lock the table just once, to prevent it being done once per row. # Note that, according to Postgres' documentation, once obtained, # the lock is held for the remainder of the current transaction. - self.engine.lock_table(txn, "user_ips") + self.engine.lock_table(txn, table) for keyv, valv in zip(key_values, value_values): _keys = dict(zip(key_names, keyv)) From 0f02f0b4da92229e88e27a92ea3bfa523457bfc1 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 16 Jun 2023 14:12:24 -0500 Subject: [PATCH 145/562] Remove experimental MSC2716 implementation to incrementally import history into existing rooms (#15748) Context for why we're removing the implementation: - https://github.com/matrix-org/matrix-spec-proposals/pull/2716#issuecomment-1487441010 - https://github.com/matrix-org/matrix-spec-proposals/pull/2716#issuecomment-1504262734 Anyone wanting to continue MSC2716, should also address these leftover tasks: https://github.com/matrix-org/synapse/issues/10737 Closes https://github.com/matrix-org/synapse/issues/10737 in the fact that it is not longer necessary to track those things. --- changelog.d/15748.removal | 1 + .../conf/workers-shared-extra.yaml.j2 | 2 - docker/configure_workers_and_start.py | 1 - docs/workers.md | 1 - scripts-dev/complement.sh | 4 - synapse/api/constants.py | 14 - synapse/api/room_versions.py | 61 --- synapse/app/generic_worker.py | 2 - synapse/config/experimental.py | 3 - synapse/event_auth.py | 40 -- synapse/events/__init__.py | 9 - synapse/events/utils.py | 9 - synapse/handlers/federation.py | 33 +- synapse/handlers/federation_event.py | 109 ---- synapse/handlers/message.py | 168 +------ synapse/handlers/room_batch.py | 466 ------------------ synapse/handlers/room_member.py | 54 +- synapse/push/bulk_push_rule_evaluator.py | 1 - synapse/rest/__init__.py | 2 - synapse/rest/client/room_batch.py | 254 ---------- synapse/rest/client/versions.py | 2 - synapse/server.py | 5 - synapse/storage/databases/main/__init__.py | 2 - .../databases/main/event_federation.py | 211 +------- synapse/storage/databases/main/events.py | 125 ----- synapse/storage/databases/main/room_batch.py | 47 -- tests/rest/client/test_room_batch.py | 302 ------------ tests/storage/test_event_federation.py | 211 -------- 28 files changed, 36 insertions(+), 2103 deletions(-) create mode 100644 changelog.d/15748.removal delete mode 100644 synapse/handlers/room_batch.py delete mode 100644 synapse/rest/client/room_batch.py delete mode 100644 synapse/storage/databases/main/room_batch.py delete mode 100644 tests/rest/client/test_room_batch.py diff --git a/changelog.d/15748.removal b/changelog.d/15748.removal new file mode 100644 index 0000000000..dcb9780178 --- /dev/null +++ b/changelog.d/15748.removal @@ -0,0 +1 @@ +Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms. diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index 63acf86a46..2b11b487f6 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true ## Experimental Features ## experimental_features: - # Enable history backfilling support - msc2716_enabled: true # client-side support for partial state in /send_join responses faster_joins: true # Enable support for polls diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 87a740e3d4..62fb88daab 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -244,7 +244,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_matrix/client/(api/v1|r0|v3|unstable)/join/", "^/_matrix/client/(api/v1|r0|v3|unstable)/knock/", "^/_matrix/client/(api/v1|r0|v3|unstable)/profile/", - "^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/workers.md b/docs/workers.md index 991814c0bc..735128762a 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -232,7 +232,6 @@ information. ^/_matrix/client/v1/rooms/.*/hierarchy$ ^/_matrix/client/(v1|unstable)/rooms/.*/relations/ ^/_matrix/client/v1/rooms/.*/threads$ - ^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$ ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$ ^/_matrix/client/(r0|v3|unstable)/account/3pid$ ^/_matrix/client/(r0|v3|unstable)/account/whoami$ diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 131f26234e..24b83cfeb6 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -246,10 +246,6 @@ else else export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite fi - - # The tests for importing historical messages (MSC2716) - # only pass with monoliths, currently. - test_tags="$test_tags,msc2716" fi if [[ -n "$ASYNCIO_REACTOR" ]]; then diff --git a/synapse/api/constants.py b/synapse/api/constants.py index faf0770c66..dc32553d0c 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -123,10 +123,6 @@ class EventTypes: SpaceChild: Final = "m.space.child" SpaceParent: Final = "m.space.parent" - MSC2716_INSERTION: Final = "org.matrix.msc2716.insertion" - MSC2716_BATCH: Final = "org.matrix.msc2716.batch" - MSC2716_MARKER: Final = "org.matrix.msc2716.marker" - Reaction: Final = "m.reaction" @@ -222,16 +218,6 @@ class EventContentFields: # Used in m.room.guest_access events. GUEST_ACCESS: Final = "guest_access" - # Used on normal messages to indicate they were historically imported after the fact - MSC2716_HISTORICAL: Final = "org.matrix.msc2716.historical" - # For "insertion" events to indicate what the next batch ID should be in - # order to connect to it - MSC2716_NEXT_BATCH_ID: Final = "next_batch_id" - # Used on "batch" events to indicate which insertion event it connects to - MSC2716_BATCH_ID: Final = "batch_id" - # For "marker" events - MSC2716_INSERTION_EVENT_REFERENCE: Final = "insertion_event_reference" - # The authorising user for joining a restricted room. AUTHORISING_USER: Final = "join_authorised_via_users_server" diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index c5c71e242f..25c105a4c8 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -91,11 +91,6 @@ class RoomVersion: # MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending # m.room.membership event with membership 'knock'. msc2403_knocking: bool - # MSC2716: Adds m.room.power_levels -> content.historical field to control - # whether "insertion", "chunk", "marker" events can be sent - msc2716_historical: bool - # MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events - msc2716_redactions: bool # MSC3389: Protect relation information from redaction. msc3389_relation_redactions: bool # MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of @@ -130,8 +125,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -153,8 +146,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -176,8 +167,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -199,8 +188,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -222,8 +209,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -245,8 +230,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -268,8 +251,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=False, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -291,8 +272,6 @@ class RoomVersions: msc3083_join_rules=False, msc3375_redaction_rules=False, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -314,8 +293,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=False, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -337,8 +314,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -360,8 +335,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=False, @@ -383,8 +356,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, @@ -406,8 +377,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, @@ -415,29 +384,6 @@ class RoomVersions: msc3931_push_features=(), msc3989_redaction_rules=False, ) - MSC2716v4 = RoomVersion( - "org.matrix.msc2716v4", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=True, - msc2716_historical=True, - msc2716_redactions=True, - msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, - msc3931_push_features=(), - msc3989_redaction_rules=False, - ) MSC1767v10 = RoomVersion( # MSC1767 (Extensible Events) based on room version "10" "org.matrix.msc1767.10", @@ -453,8 +399,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, @@ -476,8 +420,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, @@ -500,8 +442,6 @@ class RoomVersions: msc3083_join_rules=True, msc3375_redaction_rules=True, msc2403_knocking=True, - msc2716_historical=False, - msc2716_redactions=False, msc3389_relation_redactions=False, msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, @@ -526,7 +466,6 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V9, RoomVersions.MSC3787, RoomVersions.V10, - RoomVersions.MSC2716v4, RoomVersions.MSC3989, RoomVersions.MSC3820opt2, ) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 909ebccf78..7406c3948c 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -83,7 +83,6 @@ from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.storage.databases.main.relations import RelationsWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore -from synapse.storage.databases.main.room_batch import RoomBatchStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.session import SessionStore @@ -120,7 +119,6 @@ class GenericWorkerStore( # the races it creates aren't too bad. KeyStore, RoomWorkerStore, - RoomBatchStore, DirectoryWorkerStore, PushRulesWorkerStore, ApplicationServiceTransactionWorkerStore, diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1d5b5ded45..8e0f5356b4 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -247,9 +247,6 @@ class ExperimentalConfig(Config): # MSC3026 (busy presence state) self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False) - # MSC2716 (importing historical messages) - self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) - # MSC3244 (room version capabilities) self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index b4b43ec4d7..3aaf53dfbd 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -339,13 +339,6 @@ def check_state_dependent_auth_rules( if event.type == EventTypes.Redaction: check_redaction(event.room_version, event, auth_dict) - if ( - event.type == EventTypes.MSC2716_INSERTION - or event.type == EventTypes.MSC2716_BATCH - or event.type == EventTypes.MSC2716_MARKER - ): - check_historical(event.room_version, event, auth_dict) - logger.debug("Allowing! %s", event) @@ -365,7 +358,6 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = { RoomVersions.V9, RoomVersions.MSC3787, RoomVersions.V10, - RoomVersions.MSC2716v4, RoomVersions.MSC1767v10, } @@ -823,38 +815,6 @@ def check_redaction( raise AuthError(403, "You don't have permission to redact events") -def check_historical( - room_version_obj: RoomVersion, - event: "EventBase", - auth_events: StateMap["EventBase"], -) -> None: - """Check whether the event sender is allowed to send historical related - events like "insertion", "batch", and "marker". - - Returns: - None - - Raises: - AuthError if the event sender is not allowed to send historical related events - ("insertion", "batch", and "marker"). - """ - # Ignore the auth checks in room versions that do not support historical - # events - if not room_version_obj.msc2716_historical: - return - - user_level = get_user_power_level(event.user_id, auth_events) - - historical_level = get_named_level(auth_events, "historical", 100) - - if user_level < historical_level: - raise UnstableSpecAuthError( - 403, - 'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")', - errcode=Codes.INSUFFICIENT_POWER, - ) - - def _check_power_levels( room_version_obj: RoomVersion, event: "EventBase", diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index de7e5be42b..75b62adb33 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -198,7 +198,6 @@ class _EventInternalMetadata: soft_failed: DictProperty[bool] = DictProperty("soft_failed") proactively_send: DictProperty[bool] = DictProperty("proactively_send") redacted: DictProperty[bool] = DictProperty("redacted") - historical: DictProperty[bool] = DictProperty("historical") txn_id: DictProperty[str] = DictProperty("txn_id") """The transaction ID, if it was set when the event was created.""" @@ -288,14 +287,6 @@ class _EventInternalMetadata: """ return self._dict.get("redacted", False) - def is_historical(self) -> bool: - """Whether this is a historical message. - This is used by the batchsend historical message endpoint and - is needed to and mark the event as backfilled and skip some checks - like push notifications. - """ - return self._dict.get("historical", False) - def is_notifiable(self) -> bool: """Whether this event can trigger a push notification""" return not self.is_outlier() or self.is_out_of_band_membership() diff --git a/synapse/events/utils.py b/synapse/events/utils.py index e7b7b78b84..a55efcca56 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -164,21 +164,12 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic if room_version.msc2176_redaction_rules: add_fields("invite") - if room_version.msc2716_historical: - add_fields("historical") - elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth: add_fields("aliases") elif event_type == EventTypes.RoomHistoryVisibility: add_fields("history_visibility") elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules: add_fields("redacts") - elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION: - add_fields(EventContentFields.MSC2716_NEXT_BATCH_ID) - elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH: - add_fields(EventContentFields.MSC2716_BATCH_ID) - elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER: - add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE) # Protect the rel_type and event_id fields under the m.relates_to field. if room_version.msc3389_relation_redactions: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b7b5e21020..cc5ed97730 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -105,14 +105,12 @@ backfill_processing_before_timer = Histogram( ) +# TODO: We can refactor this away now that there is only one backfill point again class _BackfillPointType(Enum): # a regular backwards extremity (ie, an event which we don't yet have, but which # is referred to by other events in the DAG) BACKWARDS_EXTREMITY = enum.auto() - # an MSC2716 "insertion event" - INSERTION_PONT = enum.auto() - @attr.s(slots=True, auto_attribs=True, frozen=True) class _BackfillPoint: @@ -273,32 +271,10 @@ class FederationHandler: ) ] - insertion_events_to_be_backfilled: List[_BackfillPoint] = [] - if self.hs.config.experimental.msc2716_enabled: - insertion_events_to_be_backfilled = [ - _BackfillPoint(event_id, depth, _BackfillPointType.INSERTION_PONT) - for event_id, depth in await self.store.get_insertion_event_backward_extremities_in_room( - room_id=room_id, - current_depth=current_depth, - # We only need to end up with 5 extremities combined with - # the backfill points to make the `/backfill` request ... - # (see the other comment above for more context). - limit=50, - ) - ] - logger.debug( - "_maybe_backfill_inner: backwards_extremities=%s insertion_events_to_be_backfilled=%s", - backwards_extremities, - insertion_events_to_be_backfilled, - ) - # we now have a list of potential places to backpaginate from. We prefer to # start with the most recent (ie, max depth), so let's sort the list. sorted_backfill_points: List[_BackfillPoint] = sorted( - itertools.chain( - backwards_extremities, - insertion_events_to_be_backfilled, - ), + backwards_extremities, key=lambda e: -int(e.depth), ) @@ -411,10 +387,7 @@ class FederationHandler: # event but not anything before it. This would require looking at the # state *before* the event, ignoring the special casing certain event # types have. - if bp.type == _BackfillPointType.INSERTION_PONT: - event_ids_to_check = [bp.event_id] - else: - event_ids_to_check = await self.store.get_successor_events(bp.event_id) + event_ids_to_check = await self.store.get_successor_events(bp.event_id) events_to_check = await self.store.get_events_as_list( event_ids_to_check, diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 42141d3670..d32d224d56 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -601,18 +601,6 @@ class FederationEventHandler: room_id, [(event, context)] ) - # If we're joining the room again, check if there is new marker - # state indicating that there is new history imported somewhere in - # the DAG. Multiple markers can exist in the current state with - # unique state_keys. - # - # Do this after the state from the remote join was persisted (via - # `persist_events_and_notify`). Otherwise we can run into a - # situation where the create event doesn't exist yet in the - # `current_state_events` - for e in state: - await self._handle_marker_event(origin, e) - return stream_id_after_persist async def update_state_for_partial_state_event( @@ -915,13 +903,6 @@ class FederationEventHandler: ) ) - # We construct the event lists in source order from `/backfill` response because - # it's a) easiest, but also b) the order in which we process things matters for - # MSC2716 historical batches because many historical events are all at the same - # `depth` and we rely on the tenuous sort that the other server gave us and hope - # they're doing their best. The brittle nature of this ordering for historical - # messages over federation is one of the reasons why we don't want to continue - # on MSC2716 until we have online topological ordering. events_with_failed_pull_attempts, fresh_events = partition( new_events, lambda e: e.event_id in event_ids_with_failed_pull_attempts ) @@ -1460,8 +1441,6 @@ class FederationEventHandler: await self._run_push_actions_and_persist_event(event, context, backfilled) - await self._handle_marker_event(origin, event) - if backfilled or context.rejected: return @@ -1559,94 +1538,6 @@ class FederationEventHandler: except Exception: logger.exception("Failed to resync device for %s", sender) - @trace - async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None: - """Handles backfilling the insertion event when we receive a marker - event that points to one. - - Args: - origin: Origin of the event. Will be called to get the insertion event - marker_event: The event to process - """ - - if marker_event.type != EventTypes.MSC2716_MARKER: - # Not a marker event - return - - if marker_event.rejected_reason is not None: - # Rejected event - return - - # Skip processing a marker event if the room version doesn't - # support it or the event is not from the room creator. - room_version = await self._store.get_room_version(marker_event.room_id) - create_event = await self._store.get_create_event_for_room(marker_event.room_id) - if not room_version.msc2175_implicit_room_creator: - room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) - else: - room_creator = create_event.sender - if not room_version.msc2716_historical and ( - not self._config.experimental.msc2716_enabled - or marker_event.sender != room_creator - ): - return - - logger.debug("_handle_marker_event: received %s", marker_event) - - insertion_event_id = marker_event.content.get( - EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE - ) - - if insertion_event_id is None: - # Nothing to retrieve then (invalid marker) - return - - already_seen_insertion_event = await self._store.have_seen_event( - marker_event.room_id, insertion_event_id - ) - if already_seen_insertion_event: - # No need to process a marker again if we have already seen the - # insertion event that it was pointing to - return - - logger.debug( - "_handle_marker_event: backfilling insertion event %s", insertion_event_id - ) - - await self._get_events_and_persist( - origin, - marker_event.room_id, - [insertion_event_id], - ) - - insertion_event = await self._store.get_event( - insertion_event_id, allow_none=True - ) - if insertion_event is None: - logger.warning( - "_handle_marker_event: server %s didn't return insertion event %s for marker %s", - origin, - insertion_event_id, - marker_event.event_id, - ) - return - - logger.debug( - "_handle_marker_event: succesfully backfilled insertion event %s from marker event %s", - insertion_event, - marker_event, - ) - - await self._store.insert_insertion_extremity( - insertion_event_id, marker_event.room_id - ) - - logger.debug( - "_handle_marker_event: insertion extremity added for %s from marker event %s", - insertion_event, - marker_event, - ) - async def backfill_event_id( self, destinations: List[str], room_id: str, event_id: str ) -> PulledPduInfo: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0b61c2272b..4292b47037 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -60,7 +60,6 @@ from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.replication.http.send_events import ReplicationSendEventsRestServlet from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( - MutableStateMap, PersistedEventPosition, Requester, RoomAlias, @@ -573,7 +572,6 @@ class EventCreationHandler: state_event_ids: Optional[List[str]] = None, require_consent: bool = True, outlier: bool = False, - historical: bool = False, depth: Optional[int] = None, state_map: Optional[StateMap[str]] = None, for_batch: bool = False, @@ -599,7 +597,7 @@ class EventCreationHandler: allow_no_prev_events: Whether to allow this event to be created an empty list of prev_events. Normally this is prohibited just because most events should have a prev_event and we should only use this in special - cases like MSC2716. + cases (previously useful for MSC2716). prev_event_ids: the forward extremities to use as the prev_events for the new event. @@ -614,13 +612,10 @@ class EventCreationHandler: If non-None, prev_event_ids must also be provided. state_event_ids: - The full state at a given event. This is used particularly by the MSC2716 - /batch_send endpoint. One use case is with insertion events which float at - the beginning of a historical batch and don't have any `prev_events` to - derive from; we add all of these state events as the explicit state so the - rest of the historical batch can inherit the same state and state_group. - This should normally be left as None, which will cause the auth_event_ids - to be calculated based on the room state at the prev_events. + The full state at a given event. This was previously used particularly + by the MSC2716 /batch_send endpoint. This should normally be left as + None, which will cause the auth_event_ids to be calculated based on the + room state at the prev_events. require_consent: Whether to check if the requester has consented to the privacy policy. @@ -629,10 +624,6 @@ class EventCreationHandler: it's from an arbitrary point and floating in the DAG as opposed to being inline with the current DAG. - historical: Indicates whether the message is being inserted - back in time around some existing events. This is used to skip - a few checks and mark the event as backfilled. - depth: Override the depth used to order the event in the DAG. Should normally be set to None, which will cause the depth to be calculated based on the prev_events. @@ -717,8 +708,6 @@ class EventCreationHandler: builder.internal_metadata.outlier = outlier - builder.internal_metadata.historical = historical - event, unpersisted_context = await self.create_new_client_event( builder=builder, requester=requester, @@ -947,7 +936,6 @@ class EventCreationHandler: txn_id: Optional[str] = None, ignore_shadow_ban: bool = False, outlier: bool = False, - historical: bool = False, depth: Optional[int] = None, ) -> Tuple[EventBase, int]: """ @@ -961,19 +949,16 @@ class EventCreationHandler: allow_no_prev_events: Whether to allow this event to be created an empty list of prev_events. Normally this is prohibited just because most events should have a prev_event and we should only use this in special - cases like MSC2716. + cases (previously useful for MSC2716). prev_event_ids: The event IDs to use as the prev events. Should normally be left as None to automatically request them from the database. state_event_ids: - The full state at a given event. This is used particularly by the MSC2716 - /batch_send endpoint. One use case is with insertion events which float at - the beginning of a historical batch and don't have any `prev_events` to - derive from; we add all of these state events as the explicit state so the - rest of the historical batch can inherit the same state and state_group. - This should normally be left as None, which will cause the auth_event_ids - to be calculated based on the room state at the prev_events. + The full state at a given event. This was previously used particularly + by the MSC2716 /batch_send endpoint. This should normally be left as + None, which will cause the auth_event_ids to be calculated based on the + room state at the prev_events. ratelimit: Whether to rate limit this send. txn_id: The transaction ID. ignore_shadow_ban: True if shadow-banned users should be allowed to @@ -981,9 +966,6 @@ class EventCreationHandler: outlier: Indicates whether the event is an `outlier`, i.e. if it's from an arbitrary point and floating in the DAG as opposed to being inline with the current DAG. - historical: Indicates whether the message is being inserted - back in time around some existing events. This is used to skip - a few checks and mark the event as backfilled. depth: Override the depth used to order the event in the DAG. Should normally be set to None, which will cause the depth to be calculated based on the prev_events. @@ -1053,7 +1035,6 @@ class EventCreationHandler: prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, outlier=outlier, - historical=historical, depth=depth, ) context = await unpersisted_context.persist(event) @@ -1145,7 +1126,7 @@ class EventCreationHandler: allow_no_prev_events: Whether to allow this event to be created an empty list of prev_events. Normally this is prohibited just because most events should have a prev_event and we should only use this in special - cases like MSC2716. + cases (previously useful for MSC2716). prev_event_ids: the forward extremities to use as the prev_events for the new event. @@ -1158,13 +1139,10 @@ class EventCreationHandler: based on the room state at the prev_events. state_event_ids: - The full state at a given event. This is used particularly by the MSC2716 - /batch_send endpoint. One use case is with insertion events which float at - the beginning of a historical batch and don't have any `prev_events` to - derive from; we add all of these state events as the explicit state so the - rest of the historical batch can inherit the same state and state_group. - This should normally be left as None, which will cause the auth_event_ids - to be calculated based on the room state at the prev_events. + The full state at a given event. This was previously used particularly + by the MSC2716 /batch_send endpoint. This should normally be left as + None, which will cause the auth_event_ids to be calculated based on the + room state at the prev_events. depth: Override the depth used to order the event in the DAG. Should normally be set to None, which will cause the depth to be calculated @@ -1261,52 +1239,6 @@ class EventCreationHandler: if builder.internal_metadata.outlier: event.internal_metadata.outlier = True context = EventContext.for_outlier(self._storage_controllers) - elif ( - event.type == EventTypes.MSC2716_INSERTION - and state_event_ids - and builder.internal_metadata.is_historical() - ): - # Add explicit state to the insertion event so it has state to derive - # from even though it's floating with no `prev_events`. The rest of - # the batch can derive from this state and state_group. - # - # TODO(faster_joins): figure out how this works, and make sure that the - # old state is complete. - # https://github.com/matrix-org/synapse/issues/13003 - metadata = await self.store.get_metadata_for_events(state_event_ids) - - state_map_for_event: MutableStateMap[str] = {} - for state_id in state_event_ids: - data = metadata.get(state_id) - if data is None: - # We're trying to persist a new historical batch of events - # with the given state, e.g. via - # `RoomBatchSendEventRestServlet`. The state can be inferred - # by Synapse or set directly by the client. - # - # Either way, we should have persisted all the state before - # getting here. - raise Exception( - f"State event {state_id} not found in DB," - " Synapse should have persisted it before using it." - ) - - if data.state_key is None: - raise Exception( - f"Trying to set non-state event {state_id} as state" - ) - - state_map_for_event[(data.event_type, data.state_key)] = state_id - - # TODO(faster_joins): check how MSC2716 works and whether we can have - # partial state here - # https://github.com/matrix-org/synapse/issues/13003 - context = await self.state.calculate_context_info( - event, - state_ids_before_event=state_map_for_event, - partial_state=False, - ) - else: context = await self.state.calculate_context_info(event) @@ -1876,28 +1808,6 @@ class EventCreationHandler: 403, "Redacting server ACL events is not permitted" ) - # Add a little safety stop-gap to prevent people from trying to - # redact MSC2716 related events when they're in a room version - # which does not support it yet. We allow people to use MSC2716 - # events in existing room versions but only from the room - # creator since it does not require any changes to the auth - # rules and in effect, the redaction algorithm . In the - # supported room version, we add the `historical` power level to - # auth the MSC2716 related events and adjust the redaction - # algorthim to keep the `historical` field around (redacting an - # event should only strip fields which don't affect the - # structural protocol level). - is_msc2716_event = ( - original_event.type == EventTypes.MSC2716_INSERTION - or original_event.type == EventTypes.MSC2716_BATCH - or original_event.type == EventTypes.MSC2716_MARKER - ) - if not room_version_obj.msc2716_historical and is_msc2716_event: - raise AuthError( - 403, - "Redacting MSC2716 events is not supported in this room version", - ) - event_types = event_auth.auth_types_for_event(event.room_version, event) prev_state_ids = await context.get_prev_state_ids( StateFilter.from_types(event_types) @@ -1935,58 +1845,12 @@ class EventCreationHandler: if prev_state_ids: raise AuthError(403, "Changing the room create event is forbidden") - if event.type == EventTypes.MSC2716_INSERTION: - room_version = await self.store.get_room_version_id(event.room_id) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - - create_event = await self.store.get_create_event_for_room(event.room_id) - if not room_version_obj.msc2175_implicit_room_creator: - room_creator = create_event.content.get( - EventContentFields.ROOM_CREATOR - ) - else: - room_creator = create_event.sender - - # Only check an insertion event if the room version - # supports it or the event is from the room creator. - if room_version_obj.msc2716_historical or ( - self.config.experimental.msc2716_enabled - and event.sender == room_creator - ): - next_batch_id = event.content.get( - EventContentFields.MSC2716_NEXT_BATCH_ID - ) - conflicting_insertion_event_id = None - if next_batch_id: - conflicting_insertion_event_id = ( - await self.store.get_insertion_event_id_by_batch_id( - event.room_id, next_batch_id - ) - ) - if conflicting_insertion_event_id is not None: - # The current insertion event that we're processing is invalid - # because an insertion event already exists in the room with the - # same next_batch_id. We can't allow multiple because the batch - # pointing will get weird, e.g. we can't determine which insertion - # event the batch event is pointing to. - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Another insertion event already exists with the same next_batch_id", - errcode=Codes.INVALID_PARAM, - ) - - # Mark any `m.historical` messages as backfilled so they don't appear - # in `/sync` and have the proper decrementing `stream_ordering` as we import - backfilled = False - if event.internal_metadata.is_historical(): - backfilled = True - assert self._storage_controllers.persistence is not None ( persisted_events, max_stream_token, ) = await self._storage_controllers.persistence.persist_events( - events_and_context, backfilled=backfilled + events_and_context, ) events_and_pos = [] diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py deleted file mode 100644 index bf9df60218..0000000000 --- a/synapse/handlers/room_batch.py +++ /dev/null @@ -1,466 +0,0 @@ -import logging -from typing import TYPE_CHECKING, List, Tuple - -from synapse.api.constants import EventContentFields, EventTypes -from synapse.appservice import ApplicationService -from synapse.http.servlet import assert_params_in_dict -from synapse.types import JsonDict, Requester, UserID, create_requester -from synapse.util.stringutils import random_string - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class RoomBatchHandler: - def __init__(self, hs: "HomeServer"): - self.hs = hs - self.store = hs.get_datastores().main - self._state_storage_controller = hs.get_storage_controllers().state - self.event_creation_handler = hs.get_event_creation_handler() - self.room_member_handler = hs.get_room_member_handler() - self.auth = hs.get_auth() - - async def inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int: - """Finds the depth which would sort it after the most-recent - prev_event_id but before the successors of those events. If no - successors are found, we assume it's an historical extremity part of the - current batch and use the same depth of the prev_event_ids. - - Args: - prev_event_ids: List of prev event IDs - - Returns: - Inherited depth - """ - ( - most_recent_prev_event_id, - most_recent_prev_event_depth, - ) = await self.store.get_max_depth_of(prev_event_ids) - - # We want to insert the historical event after the `prev_event` but before the successor event - # - # We inherit depth from the successor event instead of the `prev_event` - # because events returned from `/messages` are first sorted by `topological_ordering` - # which is just the `depth` and then tie-break with `stream_ordering`. - # - # We mark these inserted historical events as "backfilled" which gives them a - # negative `stream_ordering`. If we use the same depth as the `prev_event`, - # then our historical event will tie-break and be sorted before the `prev_event` - # when it should come after. - # - # We want to use the successor event depth so they appear after `prev_event` because - # it has a larger `depth` but before the successor event because the `stream_ordering` - # is negative before the successor event. - assert most_recent_prev_event_id is not None - successor_event_ids = await self.store.get_successor_events( - most_recent_prev_event_id - ) - - # If we can't find any successor events, then it's a forward extremity of - # historical messages and we can just inherit from the previous historical - # event which we can already assume has the correct depth where we want - # to insert into. - if not successor_event_ids: - depth = most_recent_prev_event_depth - else: - ( - _, - oldest_successor_depth, - ) = await self.store.get_min_depth_of(successor_event_ids) - - depth = oldest_successor_depth - - return depth - - def create_insertion_event_dict( - self, sender: str, room_id: str, origin_server_ts: int - ) -> JsonDict: - """Creates an event dict for an "insertion" event with the proper fields - and a random batch ID. - - Args: - sender: The event author MXID - room_id: The room ID that the event belongs to - origin_server_ts: Timestamp when the event was sent - - Returns: - The new event dictionary to insert. - """ - - next_batch_id = random_string(8) - insertion_event = { - "type": EventTypes.MSC2716_INSERTION, - "sender": sender, - "room_id": room_id, - "content": { - EventContentFields.MSC2716_NEXT_BATCH_ID: next_batch_id, - EventContentFields.MSC2716_HISTORICAL: True, - }, - "origin_server_ts": origin_server_ts, - } - - return insertion_event - - async def create_requester_for_user_id_from_app_service( - self, user_id: str, app_service: ApplicationService - ) -> Requester: - """Creates a new requester for the given user_id - and validates that the app service is allowed to control - the given user. - - Args: - user_id: The author MXID that the app service is controlling - app_service: The app service that controls the user - - Returns: - Requester object - """ - - await self.auth.validate_appservice_can_control_user_id(app_service, user_id) - - return create_requester(user_id, app_service=app_service) - - async def get_most_recent_full_state_ids_from_event_id_list( - self, event_ids: List[str] - ) -> List[str]: - """Find the most recent event_id and grab the full state at that event. - We will use this as a base to auth our historical messages against. - - Args: - event_ids: List of event ID's to look at - - Returns: - List of event ID's - """ - - ( - most_recent_event_id, - _, - ) = await self.store.get_max_depth_of(event_ids) - # mapping from (type, state_key) -> state_event_id - assert most_recent_event_id is not None - prev_state_map = await self._state_storage_controller.get_state_ids_for_event( - most_recent_event_id - ) - # List of state event ID's - full_state_ids = list(prev_state_map.values()) - - return full_state_ids - - async def persist_state_events_at_start( - self, - state_events_at_start: List[JsonDict], - room_id: str, - initial_state_event_ids: List[str], - app_service_requester: Requester, - ) -> List[str]: - """Takes all `state_events_at_start` event dictionaries and creates/persists - them in a floating state event chain which don't resolve into the current room - state. They are floating because they reference no prev_events which disconnects - them from the normal DAG. - - Args: - state_events_at_start: - room_id: Room where you want the events persisted in. - initial_state_event_ids: - The base set of state for the historical batch which the floating - state chain will derive from. This should probably be the state - from the `prev_event` defined by `/batch_send?prev_event_id=$abc`. - app_service_requester: The requester of an application service. - - Returns: - List of state event ID's we just persisted - """ - assert app_service_requester.app_service - - state_event_ids_at_start = [] - state_event_ids = initial_state_event_ids.copy() - - # Make the state events float off on their own by specifying no - # prev_events for the first one in the chain so we don't have a bunch of - # `@mxid joined the room` noise between each batch. - prev_event_ids_for_state_chain: List[str] = [] - - for index, state_event in enumerate(state_events_at_start): - assert_params_in_dict( - state_event, ["type", "origin_server_ts", "content", "sender"] - ) - - logger.debug( - "RoomBatchSendEventRestServlet inserting state_event=%s", state_event - ) - - event_dict = { - "type": state_event["type"], - "origin_server_ts": state_event["origin_server_ts"], - "content": state_event["content"], - "room_id": room_id, - "sender": state_event["sender"], - "state_key": state_event["state_key"], - } - - # Mark all events as historical - event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True - - # TODO: This is pretty much the same as some other code to handle inserting state in this file - if event_dict["type"] == EventTypes.Member: - membership = event_dict["content"].get("membership", None) - event_id, _ = await self.room_member_handler.update_membership( - await self.create_requester_for_user_id_from_app_service( - state_event["sender"], app_service_requester.app_service - ), - target=UserID.from_string(event_dict["state_key"]), - room_id=room_id, - action=membership, - content=event_dict["content"], - historical=True, - # Only the first event in the state chain should be floating. - # The rest should hang off each other in a chain. - allow_no_prev_events=index == 0, - prev_event_ids=prev_event_ids_for_state_chain, - # The first event in the state chain is floating with no - # `prev_events` which means it can't derive state from - # anywhere automatically. So we need to set some state - # explicitly. - # - # Make sure to use a copy of this list because we modify it - # later in the loop here. Otherwise it will be the same - # reference and also update in the event when we append - # later. - state_event_ids=state_event_ids.copy(), - ) - else: - ( - event, - _, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - await self.create_requester_for_user_id_from_app_service( - state_event["sender"], app_service_requester.app_service - ), - event_dict, - historical=True, - # Only the first event in the state chain should be floating. - # The rest should hang off each other in a chain. - allow_no_prev_events=index == 0, - prev_event_ids=prev_event_ids_for_state_chain, - # The first event in the state chain is floating with no - # `prev_events` which means it can't derive state from - # anywhere automatically. So we need to set some state - # explicitly. - # - # Make sure to use a copy of this list because we modify it - # later in the loop here. Otherwise it will be the same - # reference and also update in the event when we append later. - state_event_ids=state_event_ids.copy(), - ) - event_id = event.event_id - - state_event_ids_at_start.append(event_id) - state_event_ids.append(event_id) - # Connect all the state in a floating chain - prev_event_ids_for_state_chain = [event_id] - - return state_event_ids_at_start - - async def persist_historical_events( - self, - events_to_create: List[JsonDict], - room_id: str, - inherited_depth: int, - initial_state_event_ids: List[str], - app_service_requester: Requester, - ) -> List[str]: - """Create and persists all events provided sequentially. Handles the - complexity of creating events in chronological order so they can - reference each other by prev_event but still persists in - reverse-chronoloical order so they have the correct - (topological_ordering, stream_ordering) and sort correctly from - /messages. - - Args: - events_to_create: List of historical events to create in JSON - dictionary format. - room_id: Room where you want the events persisted in. - inherited_depth: The depth to create the events at (you will - probably by calling inherit_depth_from_prev_ids(...)). - initial_state_event_ids: - This is used to set explicit state for the insertion event at - the start of the historical batch since it's floating with no - prev_events to derive state from automatically. - app_service_requester: The requester of an application service. - - Returns: - List of persisted event IDs - """ - assert app_service_requester.app_service - - # We expect the first event in a historical batch to be an insertion event - assert events_to_create[0]["type"] == EventTypes.MSC2716_INSERTION - # We expect the last event in a historical batch to be an batch event - assert events_to_create[-1]["type"] == EventTypes.MSC2716_BATCH - - # Make the historical event chain float off on its own by specifying no - # prev_events for the first event in the chain which causes the HS to - # ask for the state at the start of the batch later. - prev_event_ids: List[str] = [] - - event_ids = [] - events_to_persist = [] - for index, ev in enumerate(events_to_create): - assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"]) - - assert self.hs.is_mine_id(ev["sender"]), "User must be our own: %s" % ( - ev["sender"], - ) - - event_dict = { - "type": ev["type"], - "origin_server_ts": ev["origin_server_ts"], - "content": ev["content"], - "room_id": room_id, - "sender": ev["sender"], # requester.user.to_string(), - "prev_events": prev_event_ids.copy(), - } - - # Mark all events as historical - event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True - - event, unpersisted_context = await self.event_creation_handler.create_event( - await self.create_requester_for_user_id_from_app_service( - ev["sender"], app_service_requester.app_service - ), - event_dict, - # Only the first event (which is the insertion event) in the - # chain should be floating. The rest should hang off each other - # in a chain. - allow_no_prev_events=index == 0, - prev_event_ids=event_dict.get("prev_events"), - # Since the first event (which is the insertion event) in the - # chain is floating with no `prev_events`, it can't derive state - # from anywhere automatically. So we need to set some state - # explicitly. - state_event_ids=initial_state_event_ids if index == 0 else None, - historical=True, - depth=inherited_depth, - ) - context = await unpersisted_context.persist(event) - assert context._state_group - - # Normally this is done when persisting the event but we have to - # pre-emptively do it here because we create all the events first, - # then persist them in another pass below. And we want to share - # state_groups across the whole batch so this lookup needs to work - # for the next event in the batch in this loop. - await self.store.store_state_group_id_for_event_id( - event_id=event.event_id, - state_group_id=context._state_group, - ) - - logger.debug( - "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s", - event, - prev_event_ids, - ) - - events_to_persist.append((event, context)) - event_id = event.event_id - - event_ids.append(event_id) - prev_event_ids = [event_id] - - # Persist events in reverse-chronological order so they have the - # correct stream_ordering as they are backfilled (which decrements). - # Events are sorted by (topological_ordering, stream_ordering) - # where topological_ordering is just depth. - for event, context in reversed(events_to_persist): - # This call can't raise `PartialStateConflictError` since we forbid - # use of the historical batch API during partial state - await self.event_creation_handler.handle_new_client_event( - await self.create_requester_for_user_id_from_app_service( - event.sender, app_service_requester.app_service - ), - events_and_context=[(event, context)], - ) - - return event_ids - - async def handle_batch_of_events( - self, - events_to_create: List[JsonDict], - room_id: str, - batch_id_to_connect_to: str, - inherited_depth: int, - initial_state_event_ids: List[str], - app_service_requester: Requester, - ) -> Tuple[List[str], str]: - """ - Handles creating and persisting all of the historical events as well as - insertion and batch meta events to make the batch navigable in the DAG. - - Args: - events_to_create: List of historical events to create in JSON - dictionary format. - room_id: Room where you want the events created in. - batch_id_to_connect_to: The batch_id from the insertion event you - want this batch to connect to. - inherited_depth: The depth to create the events at (you will - probably by calling inherit_depth_from_prev_ids(...)). - initial_state_event_ids: - This is used to set explicit state for the insertion event at - the start of the historical batch since it's floating with no - prev_events to derive state from automatically. This should - probably be the state from the `prev_event` defined by - `/batch_send?prev_event_id=$abc` plus the outcome of - `persist_state_events_at_start` - app_service_requester: The requester of an application service. - - Returns: - Tuple containing a list of created events and the next_batch_id - """ - - # Connect this current batch to the insertion event from the previous batch - last_event_in_batch = events_to_create[-1] - batch_event = { - "type": EventTypes.MSC2716_BATCH, - "sender": app_service_requester.user.to_string(), - "room_id": room_id, - "content": { - EventContentFields.MSC2716_BATCH_ID: batch_id_to_connect_to, - EventContentFields.MSC2716_HISTORICAL: True, - }, - # Since the batch event is put at the end of the batch, - # where the newest-in-time event is, copy the origin_server_ts from - # the last event we're inserting - "origin_server_ts": last_event_in_batch["origin_server_ts"], - } - # Add the batch event to the end of the batch (newest-in-time) - events_to_create.append(batch_event) - - # Add an "insertion" event to the start of each batch (next to the oldest-in-time - # event in the batch) so the next batch can be connected to this one. - insertion_event = self.create_insertion_event_dict( - sender=app_service_requester.user.to_string(), - room_id=room_id, - # Since the insertion event is put at the start of the batch, - # where the oldest-in-time event is, copy the origin_server_ts from - # the first event we're inserting - origin_server_ts=events_to_create[0]["origin_server_ts"], - ) - next_batch_id = insertion_event["content"][ - EventContentFields.MSC2716_NEXT_BATCH_ID - ] - # Prepend the insertion event to the start of the batch (oldest-in-time) - events_to_create = [insertion_event] + events_to_create - - # Create and persist all of the historical events - event_ids = await self.persist_historical_events( - events_to_create=events_to_create, - room_id=room_id, - inherited_depth=inherited_depth, - initial_state_event_ids=initial_state_event_ids, - app_service_requester=app_service_requester, - ) - - return event_ids, next_batch_id diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 55df34bd06..82e4fa7363 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -362,7 +362,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): content: Optional[dict] = None, require_consent: bool = True, outlier: bool = False, - historical: bool = False, origin_server_ts: Optional[int] = None, ) -> Tuple[str, int]: """ @@ -378,16 +377,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): allow_no_prev_events: Whether to allow this event to be created an empty list of prev_events. Normally this is prohibited just because most events should have a prev_event and we should only use this in special - cases like MSC2716. + cases (previously useful for MSC2716). prev_event_ids: The event IDs to use as the prev events state_event_ids: - The full state at a given event. This is used particularly by the MSC2716 - /batch_send endpoint. One use case is the historical `state_events_at_start`; - since each is marked as an `outlier`, the `EventContext.for_outlier()` won't - have any `state_ids` set and therefore can't derive any state even though the - prev_events are set so we need to set them ourself via this argument. - This should normally be left as None, which will cause the auth_event_ids - to be calculated based on the room state at the prev_events. + The full state at a given event. This was previously used particularly + by the MSC2716 /batch_send endpoint. This should normally be left as + None, which will cause the auth_event_ids to be calculated based on the + room state at the prev_events. depth: Override the depth used to order the event in the DAG. Should normally be set to None, which will cause the depth to be calculated based on the prev_events. @@ -400,9 +396,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): outlier: Indicates whether the event is an `outlier`, i.e. if it's from an arbitrary point and floating in the DAG as opposed to being inline with the current DAG. - historical: Indicates whether the message is being inserted - back in time around some existing events. This is used to skip - a few checks and mark the event as backfilled. origin_server_ts: The origin_server_ts to use if a new event is created. Uses the current timestamp if set to None. @@ -477,7 +470,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): depth=depth, require_consent=require_consent, outlier=outlier, - historical=historical, ) context = await unpersisted_context.persist(event) prev_state_ids = await context.get_prev_state_ids( @@ -585,7 +577,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): new_room: bool = False, require_consent: bool = True, outlier: bool = False, - historical: bool = False, allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, @@ -610,22 +601,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): outlier: Indicates whether the event is an `outlier`, i.e. if it's from an arbitrary point and floating in the DAG as opposed to being inline with the current DAG. - historical: Indicates whether the message is being inserted - back in time around some existing events. This is used to skip - a few checks and mark the event as backfilled. allow_no_prev_events: Whether to allow this event to be created an empty list of prev_events. Normally this is prohibited just because most events should have a prev_event and we should only use this in special - cases like MSC2716. + cases (previously useful for MSC2716). prev_event_ids: The event IDs to use as the prev events state_event_ids: - The full state at a given event. This is used particularly by the MSC2716 - /batch_send endpoint. One use case is the historical `state_events_at_start`; - since each is marked as an `outlier`, the `EventContext.for_outlier()` won't - have any `state_ids` set and therefore can't derive any state even though the - prev_events are set so we need to set them ourself via this argument. - This should normally be left as None, which will cause the auth_event_ids - to be calculated based on the room state at the prev_events. + The full state at a given event. This was previously used particularly + by the MSC2716 /batch_send endpoint. This should normally be left as + None, which will cause the auth_event_ids to be calculated based on the + room state at the prev_events. depth: Override the depth used to order the event in the DAG. Should normally be set to None, which will cause the depth to be calculated based on the prev_events. @@ -667,7 +652,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): new_room=new_room, require_consent=require_consent, outlier=outlier, - historical=historical, allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, @@ -691,7 +675,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): new_room: bool = False, require_consent: bool = True, outlier: bool = False, - historical: bool = False, allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, @@ -718,22 +701,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): outlier: Indicates whether the event is an `outlier`, i.e. if it's from an arbitrary point and floating in the DAG as opposed to being inline with the current DAG. - historical: Indicates whether the message is being inserted - back in time around some existing events. This is used to skip - a few checks and mark the event as backfilled. allow_no_prev_events: Whether to allow this event to be created an empty list of prev_events. Normally this is prohibited just because most events should have a prev_event and we should only use this in special - cases like MSC2716. + cases (previously useful for MSC2716). prev_event_ids: The event IDs to use as the prev events state_event_ids: - The full state at a given event. This is used particularly by the MSC2716 - /batch_send endpoint. One use case is the historical `state_events_at_start`; - since each is marked as an `outlier`, the `EventContext.for_outlier()` won't - have any `state_ids` set and therefore can't derive any state even though the - prev_events are set so we need to set them ourself via this argument. - This should normally be left as None, which will cause the auth_event_ids - to be calculated based on the room state at the prev_events. + The full state at a given event. This was previously used particularly + by the MSC2716 /batch_send endpoint. This should normally be left as + None, which will cause the auth_event_ids to be calculated based on the + room state at the prev_events. depth: Override the depth used to order the event in the DAG. Should normally be set to None, which will cause the depth to be calculated based on the prev_events. @@ -877,7 +854,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): content=content, require_consent=require_consent, outlier=outlier, - historical=historical, origin_server_ts=origin_server_ts, ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 33002cc0f2..67377c647b 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -322,7 +322,6 @@ class BulkPushRuleEvaluator: ) -> None: if ( not event.internal_metadata.is_notifiable() - or event.internal_metadata.is_historical() or event.room_id in self.hs.config.server.rooms_to_exclude_from_sync ): # Push rules for events that aren't notifiable can't be processed by this and diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 1af8d99d20..df0845edb2 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -48,7 +48,6 @@ from synapse.rest.client import ( rendezvous, report_event, room, - room_batch, room_keys, room_upgrade_rest_servlet, sendtodevice, @@ -132,7 +131,6 @@ class ClientRestResource(JsonResource): user_directory.register_servlets(hs, client_resource) if is_main_process: room_upgrade_rest_servlet.register_servlets(hs, client_resource) - room_batch.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) if is_main_process: account_validity.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py deleted file mode 100644 index 69f85112d8..0000000000 --- a/synapse/rest/client/room_batch.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import re -from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple - -from synapse.api.constants import EventContentFields -from synapse.api.errors import AuthError, Codes, SynapseError -from synapse.http.server import HttpServer -from synapse.http.servlet import ( - RestServlet, - assert_params_in_dict, - parse_json_object_from_request, - parse_string, - parse_strings_from_args, -) -from synapse.http.site import SynapseRequest -from synapse.types import JsonDict - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class RoomBatchSendEventRestServlet(RestServlet): - """ - API endpoint which can insert a batch of events historically back in time - next to the given `prev_event`. - - `batch_id` comes from `next_batch_id `in the response of the batch send - endpoint and is derived from the "insertion" events added to each batch. - It's not required for the first batch send. - - `state_events_at_start` is used to define the historical state events - needed to auth the events like join events. These events will float - outside of the normal DAG as outlier's and won't be visible in the chat - history which also allows us to insert multiple batches without having a bunch - of `@mxid joined the room` noise between each batch. - - `events` is chronological list of events you want to insert. - There is a reverse-chronological constraint on batches so once you insert - some messages, you can only insert older ones after that. - tldr; Insert batches from your most recent history -> oldest history. - - POST /_matrix/client/unstable/org.matrix.msc2716/rooms//batch_send?prev_event_id=&batch_id= - { - "events": [ ... ], - "state_events_at_start": [ ... ] - } - """ - - PATTERNS = ( - re.compile( - "^/_matrix/client/unstable/org.matrix.msc2716" - "/rooms/(?P[^/]*)/batch_send$" - ), - ) - CATEGORY = "Client API requests" - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.store = hs.get_datastores().main - self.event_creation_handler = hs.get_event_creation_handler() - self.auth = hs.get_auth() - self.room_batch_handler = hs.get_room_batch_handler() - - async def on_POST( - self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=False) - - if not requester.app_service: - raise AuthError( - HTTPStatus.FORBIDDEN, - "Only application services can use the /batchsend endpoint", - ) - - body = parse_json_object_from_request(request) - assert_params_in_dict(body, ["state_events_at_start", "events"]) - - assert request.args is not None - prev_event_ids_from_query = parse_strings_from_args( - request.args, "prev_event_id" - ) - batch_id_from_query = parse_string(request, "batch_id") - - if prev_event_ids_from_query is None: - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "prev_event query parameter is required when inserting historical messages back in time", - errcode=Codes.MISSING_PARAM, - ) - - if await self.store.is_partial_state_room(room_id): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Cannot insert history batches until we have fully joined the room", - errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE, - ) - - # Verify the batch_id_from_query corresponds to an actual insertion event - # and have the batch connected. - if batch_id_from_query: - corresponding_insertion_event_id = ( - await self.store.get_insertion_event_id_by_batch_id( - room_id, batch_id_from_query - ) - ) - if corresponding_insertion_event_id is None: - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "No insertion event corresponds to the given ?batch_id", - errcode=Codes.INVALID_PARAM, - ) - - # Make sure that the prev_event_ids exist and aren't outliers - ie, they are - # regular parts of the room DAG where we know the state. - non_outlier_prev_events = await self.store.have_events_in_timeline( - prev_event_ids_from_query - ) - for prev_event_id in prev_event_ids_from_query: - if prev_event_id not in non_outlier_prev_events: - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "prev_event %s does not exist, or is an outlier" % (prev_event_id,), - errcode=Codes.INVALID_PARAM, - ) - - # For the event we are inserting next to (`prev_event_ids_from_query`), - # find the most recent state events that allowed that message to be - # sent. We will use that as a base to auth our historical messages - # against. - state_event_ids = await self.room_batch_handler.get_most_recent_full_state_ids_from_event_id_list( - prev_event_ids_from_query - ) - - state_event_ids_at_start = [] - # Create and persist all of the state events that float off on their own - # before the batch. These will most likely be all of the invite/member - # state events used to auth the upcoming historical messages. - if body["state_events_at_start"]: - state_event_ids_at_start = ( - await self.room_batch_handler.persist_state_events_at_start( - state_events_at_start=body["state_events_at_start"], - room_id=room_id, - initial_state_event_ids=state_event_ids, - app_service_requester=requester, - ) - ) - # Update our ongoing auth event ID list with all of the new state we - # just created - state_event_ids.extend(state_event_ids_at_start) - - inherited_depth = await self.room_batch_handler.inherit_depth_from_prev_ids( - prev_event_ids_from_query - ) - - events_to_create = body["events"] - - # Figure out which batch to connect to. If they passed in - # batch_id_from_query let's use it. The batch ID passed in comes - # from the batch_id in the "insertion" event from the previous batch. - last_event_in_batch = events_to_create[-1] - base_insertion_event = None - if batch_id_from_query: - batch_id_to_connect_to = batch_id_from_query - # Otherwise, create an insertion event to act as a starting point. - # - # We don't always have an insertion event to start hanging more history - # off of (ideally there would be one in the main DAG, but that's not the - # case if we're wanting to add history to e.g. existing rooms without - # an insertion event), in which case we just create a new insertion event - # that can then get pointed to by a "marker" event later. - else: - base_insertion_event_dict = ( - self.room_batch_handler.create_insertion_event_dict( - sender=requester.user.to_string(), - room_id=room_id, - origin_server_ts=last_event_in_batch["origin_server_ts"], - ) - ) - base_insertion_event_dict["prev_events"] = prev_event_ids_from_query.copy() - - ( - base_insertion_event, - _, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - await self.room_batch_handler.create_requester_for_user_id_from_app_service( - base_insertion_event_dict["sender"], - requester.app_service, - ), - base_insertion_event_dict, - prev_event_ids=base_insertion_event_dict.get("prev_events"), - # Also set the explicit state here because we want to resolve - # any `state_events_at_start` here too. It's not strictly - # necessary to accomplish anything but if someone asks for the - # state at this point, we probably want to show them the - # historical state that was part of this batch. - state_event_ids=state_event_ids, - historical=True, - depth=inherited_depth, - ) - - batch_id_to_connect_to = base_insertion_event.content[ - EventContentFields.MSC2716_NEXT_BATCH_ID - ] - - # Create and persist all of the historical events as well as insertion - # and batch meta events to make the batch navigable in the DAG. - event_ids, next_batch_id = await self.room_batch_handler.handle_batch_of_events( - events_to_create=events_to_create, - room_id=room_id, - batch_id_to_connect_to=batch_id_to_connect_to, - inherited_depth=inherited_depth, - initial_state_event_ids=state_event_ids, - app_service_requester=requester, - ) - - insertion_event_id = event_ids[0] - batch_event_id = event_ids[-1] - historical_event_ids = event_ids[1:-1] - - response_dict = { - "state_event_ids": state_event_ids_at_start, - "event_ids": historical_event_ids, - "next_batch_id": next_batch_id, - "insertion_event_id": insertion_event_id, - "batch_event_id": batch_event_id, - } - if base_insertion_event is not None: - response_dict["base_insertion_event_id"] = base_insertion_event.event_id - - return HTTPStatus.OK, response_dict - - -def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - msc2716_enabled = hs.config.experimental.msc2716_enabled - - if msc2716_enabled: - RoomBatchSendEventRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 1910648755..95400ba570 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -102,8 +102,6 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc2285.stable": True, # TODO: Remove when MSC2285 becomes a part of the spec # Supports filtering of /publicRooms by room type as per MSC3827 "org.matrix.msc3827.stable": True, - # Adds support for importing historical messages as per MSC2716 - "org.matrix.msc2716": self.config.experimental.msc2716_enabled, # Adds support for thread relations, per MSC3440. "org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above # Support for thread read receipts & notification counts. diff --git a/synapse/server.py b/synapse/server.py index 0f36ef69cb..b72b76a38b 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -91,7 +91,6 @@ from synapse.handlers.room import ( RoomShutdownHandler, TimestampLookupHandler, ) -from synapse.handlers.room_batch import RoomBatchHandler from synapse.handlers.room_list import RoomListHandler from synapse.handlers.room_member import ( RoomForgetterHandler, @@ -492,10 +491,6 @@ class HomeServer(metaclass=abc.ABCMeta): def get_room_creation_handler(self) -> RoomCreationHandler: return RoomCreationHandler(self) - @cache_in_self - def get_room_batch_handler(self) -> RoomBatchHandler: - return RoomBatchHandler(self) - @cache_in_self def get_room_shutdown_handler(self) -> RoomShutdownHandler: return RoomShutdownHandler(self) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 0032a92f49..3a10c265c9 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -61,7 +61,6 @@ from .registration import RegistrationStore from .rejections import RejectionsStore from .relations import RelationsStore from .room import RoomStore -from .room_batch import RoomBatchStore from .roommember import RoomMemberStore from .search import SearchStore from .session import SessionStore @@ -87,7 +86,6 @@ class DataStore( DeviceStore, RoomMemberStore, RoomStore, - RoomBatchStore, RegistrationStore, ProfileStore, PresenceStore, diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 2681917d0b..8b6e3c1dc7 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -31,7 +31,7 @@ from typing import ( import attr from prometheus_client import Counter, Gauge -from synapse.api.constants import MAX_DEPTH, EventTypes +from synapse.api.constants import MAX_DEPTH from synapse.api.errors import StoreError from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.events import EventBase, make_event_from_dict @@ -891,124 +891,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas room_id, ) - @trace - async def get_insertion_event_backward_extremities_in_room( - self, - room_id: str, - current_depth: int, - limit: int, - ) -> List[Tuple[str, int]]: - """ - Get the insertion events we know about that we haven't backfilled yet - along with the approximate depth. Only returns insertion events that are - at a depth lower than or equal to the `current_depth`. Sorted by depth, - highest to lowest (descending) so the closest events to the - `current_depth` are first in the list. - - We ignore insertion events that are newer than the user's current scroll - position (ie, those with depth greater than `current_depth`) as: - 1. we don't really care about getting events that have happened - after our current position; and - 2. by the nature of paginating and scrolling back, we have likely - previously tried and failed to backfill from that insertion event, so - to avoid getting "stuck" requesting the same backfill repeatedly - we drop those insertion event. - - Args: - room_id: Room where we want to find the oldest events - current_depth: The depth at the user's current scrollback position - limit: The max number of insertion event extremities to return - - Returns: - List of (event_id, depth) tuples. Sorted by depth, highest to lowest - (descending) so the closest events to the `current_depth` are first - in the list. - """ - - def get_insertion_event_backward_extremities_in_room_txn( - txn: LoggingTransaction, room_id: str - ) -> List[Tuple[str, int]]: - if isinstance(self.database_engine, PostgresEngine): - least_function = "LEAST" - elif isinstance(self.database_engine, Sqlite3Engine): - least_function = "MIN" - else: - raise RuntimeError("Unknown database engine") - - sql = f""" - SELECT - insertion_event_extremity.event_id, event.depth - /* We only want insertion events that are also marked as backwards extremities */ - FROM insertion_event_extremities AS insertion_event_extremity - /* Get the depth of the insertion event from the events table */ - INNER JOIN events AS event USING (event_id) - /** - * We use this info to make sure we don't retry to use a backfill point - * if we've already attempted to backfill from it recently. - */ - LEFT JOIN event_failed_pull_attempts AS failed_backfill_attempt_info - ON - failed_backfill_attempt_info.room_id = insertion_event_extremity.room_id - AND failed_backfill_attempt_info.event_id = insertion_event_extremity.event_id - WHERE - insertion_event_extremity.room_id = ? - /** - * We only want extremities that are older than or at - * the same position of the given `current_depth` (where older - * means less than the given depth) because we're looking backwards - * from the `current_depth` when backfilling. - * - * current_depth (ignore events that come after this, ignore 2-4) - * | - * ▼ - * [0]<--[1]<--[2]<--[3]<--[4] - */ - AND event.depth <= ? /* current_depth */ - /** - * Exponential back-off (up to the upper bound) so we don't retry the - * same backfill point over and over. ex. 2hr, 4hr, 8hr, 16hr, etc - * - * We use `1 << n` as a power of 2 equivalent for compatibility - * with older SQLites. The left shift equivalent only works with - * powers of 2 because left shift is a binary operation (base-2). - * Otherwise, we would use `power(2, n)` or the power operator, `2^n`. - */ - AND ( - failed_backfill_attempt_info.event_id IS NULL - OR ? /* current_time */ >= failed_backfill_attempt_info.last_attempt_ts + ( - (1 << {least_function}(failed_backfill_attempt_info.num_attempts, ? /* max doubling steps */)) - * ? /* step */ - ) - ) - /** - * Sort from highest (closest to the `current_depth`) to the lowest depth - * because the closest are most relevant to backfill from first. - * Then tie-break on alphabetical order of the event_ids so we get a - * consistent ordering which is nice when asserting things in tests. - */ - ORDER BY event.depth DESC, insertion_event_extremity.event_id DESC - LIMIT ? - """ - - txn.execute( - sql, - ( - room_id, - current_depth, - self._clock.time_msec(), - BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS, - BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS, - limit, - ), - ) - return cast(List[Tuple[str, int]], txn.fetchall()) - - return await self.db_pool.runInteraction( - "get_insertion_event_backward_extremities_in_room", - get_insertion_event_backward_extremities_in_room_txn, - room_id, - ) - async def get_max_depth_of( self, event_ids: Collection[str] ) -> Tuple[Optional[str], int]: @@ -1280,50 +1162,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas return event_ids - def _get_connected_batch_event_backfill_results_txn( - self, txn: LoggingTransaction, insertion_event_id: str, limit: int - ) -> List[BackfillQueueNavigationItem]: - """ - Find any batch connections of a given insertion event. - A batch event points at a insertion event via: - batch_event.content[MSC2716_BATCH_ID] -> insertion_event.content[MSC2716_NEXT_BATCH_ID] - - Args: - txn: The database transaction to use - insertion_event_id: The event ID to navigate from. We will find - batch events that point back at this insertion event. - limit: Max number of event ID's to query for and return - - Returns: - List of batch events that the backfill queue can process - """ - batch_connection_query = """ - SELECT e.depth, e.stream_ordering, c.event_id, e.type FROM insertion_events AS i - /* Find the batch that connects to the given insertion event */ - INNER JOIN batch_events AS c - ON i.next_batch_id = c.batch_id - /* Get the depth of the batch start event from the events table */ - INNER JOIN events AS e ON c.event_id = e.event_id - /* Find an insertion event which matches the given event_id */ - WHERE i.event_id = ? - LIMIT ? - """ - - # Find any batch connections for the given insertion event - txn.execute( - batch_connection_query, - (insertion_event_id, limit), - ) - return [ - BackfillQueueNavigationItem( - depth=row[0], - stream_ordering=row[1], - event_id=row[2], - type=row[3], - ) - for row in txn - ] - def _get_connected_prev_event_backfill_results_txn( self, txn: LoggingTransaction, event_id: str, limit: int ) -> List[BackfillQueueNavigationItem]: @@ -1472,40 +1310,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas event_id_results.add(event_id) - # Try and find any potential historical batches of message history. - if self.hs.config.experimental.msc2716_enabled: - # We need to go and try to find any batch events connected - # to a given insertion event (by batch_id). If we find any, we'll - # add them to the queue and navigate up the DAG like normal in the - # next iteration of the loop. - if event_type == EventTypes.MSC2716_INSERTION: - # Find any batch connections for the given insertion event - connected_batch_event_backfill_results = ( - self._get_connected_batch_event_backfill_results_txn( - txn, event_id, limit - len(event_id_results) - ) - ) - logger.debug( - "_get_backfill_events(room_id=%s): connected_batch_event_backfill_results=%s", - room_id, - connected_batch_event_backfill_results, - ) - for ( - connected_batch_event_backfill_item - ) in connected_batch_event_backfill_results: - if ( - connected_batch_event_backfill_item.event_id - not in event_id_results - ): - queue.put( - ( - -connected_batch_event_backfill_item.depth, - -connected_batch_event_backfill_item.stream_ordering, - connected_batch_event_backfill_item.event_id, - connected_batch_event_backfill_item.type, - ) - ) - # Now we just look up the DAG by prev_events as normal connected_prev_event_backfill_results = ( self._get_connected_prev_event_backfill_results_txn( @@ -1748,19 +1552,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas _delete_old_forward_extrem_cache_txn, ) - @trace - async def insert_insertion_extremity(self, event_id: str, room_id: str) -> None: - await self.db_pool.simple_upsert( - table="insertion_event_extremities", - keyvalues={"event_id": event_id}, - values={ - "event_id": event_id, - "room_id": room_id, - }, - insertion_values={}, - desc="insert_insertion_extremity", - ) - async def insert_received_event_to_staging( self, origin: str, event: EventBase ) -> None: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 44af3357af..5c9db7554e 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1664,9 +1664,6 @@ class PersistEventsStore: self._handle_event_relations(txn, event) - self._handle_insertion_event(txn, event) - self._handle_batch_event(txn, event) - # Store the labels for this event. labels = event.content.get(EventContentFields.LABELS) if labels: @@ -1927,128 +1924,6 @@ class PersistEventsStore: ), ) - def _handle_insertion_event( - self, txn: LoggingTransaction, event: EventBase - ) -> None: - """Handles keeping track of insertion events and edges/connections. - Part of MSC2716. - - Args: - txn: The database transaction object - event: The event to process - """ - - if event.type != EventTypes.MSC2716_INSERTION: - # Not a insertion event - return - - # Skip processing an insertion event if the room version doesn't - # support it or the event is not from the room creator. - room_version = self.store.get_room_version_txn(txn, event.room_id) - room_creator = self.db_pool.simple_select_one_onecol_txn( - txn, - table="rooms", - keyvalues={"room_id": event.room_id}, - retcol="creator", - allow_none=True, - ) - if not room_version.msc2716_historical and ( - not self.hs.config.experimental.msc2716_enabled - or event.sender != room_creator - ): - return - - next_batch_id = event.content.get(EventContentFields.MSC2716_NEXT_BATCH_ID) - if next_batch_id is None: - # Invalid insertion event without next batch ID - return - - logger.debug( - "_handle_insertion_event (next_batch_id=%s) %s", next_batch_id, event - ) - - # Keep track of the insertion event and the batch ID - self.db_pool.simple_insert_txn( - txn, - table="insertion_events", - values={ - "event_id": event.event_id, - "room_id": event.room_id, - "next_batch_id": next_batch_id, - }, - ) - - # Insert an edge for every prev_event connection - for prev_event_id in event.prev_event_ids(): - self.db_pool.simple_insert_txn( - txn, - table="insertion_event_edges", - values={ - "event_id": event.event_id, - "room_id": event.room_id, - "insertion_prev_event_id": prev_event_id, - }, - ) - - def _handle_batch_event(self, txn: LoggingTransaction, event: EventBase) -> None: - """Handles inserting the batch edges/connections between the batch event - and an insertion event. Part of MSC2716. - - Args: - txn: The database transaction object - event: The event to process - """ - - if event.type != EventTypes.MSC2716_BATCH: - # Not a batch event - return - - # Skip processing a batch event if the room version doesn't - # support it or the event is not from the room creator. - room_version = self.store.get_room_version_txn(txn, event.room_id) - room_creator = self.db_pool.simple_select_one_onecol_txn( - txn, - table="rooms", - keyvalues={"room_id": event.room_id}, - retcol="creator", - allow_none=True, - ) - if not room_version.msc2716_historical and ( - not self.hs.config.experimental.msc2716_enabled - or event.sender != room_creator - ): - return - - batch_id = event.content.get(EventContentFields.MSC2716_BATCH_ID) - if batch_id is None: - # Invalid batch event without a batch ID - return - - logger.debug("_handle_batch_event batch_id=%s %s", batch_id, event) - - # Keep track of the insertion event and the batch ID - self.db_pool.simple_insert_txn( - txn, - table="batch_events", - values={ - "event_id": event.event_id, - "room_id": event.room_id, - "batch_id": batch_id, - }, - ) - - # When we receive an event with a `batch_id` referencing the - # `next_batch_id` of the insertion event, we can remove it from the - # `insertion_event_extremities` table. - sql = """ - DELETE FROM insertion_event_extremities WHERE event_id IN ( - SELECT event_id FROM insertion_events - WHERE next_batch_id = ? - ) - """ - - txn.execute(sql, (batch_id,)) - def _handle_redact_relations( self, txn: LoggingTransaction, room_id: str, redacted_event_id: str ) -> None: diff --git a/synapse/storage/databases/main/room_batch.py b/synapse/storage/databases/main/room_batch.py deleted file mode 100644 index 131f357d04..0000000000 --- a/synapse/storage/databases/main/room_batch.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional - -from synapse.storage._base import SQLBaseStore - - -class RoomBatchStore(SQLBaseStore): - async def get_insertion_event_id_by_batch_id( - self, room_id: str, batch_id: str - ) -> Optional[str]: - """Retrieve a insertion event ID. - - Args: - batch_id: The batch ID of the insertion event to retrieve. - - Returns: - The event_id of an insertion event, or None if there is no known - insertion event for the given insertion event. - """ - return await self.db_pool.simple_select_one_onecol( - table="insertion_events", - keyvalues={"room_id": room_id, "next_batch_id": batch_id}, - retcol="event_id", - allow_none=True, - ) - - async def store_state_group_id_for_event_id( - self, event_id: str, state_group_id: int - ) -> None: - await self.db_pool.simple_upsert( - table="event_to_state_groups", - keyvalues={"event_id": event_id}, - values={"state_group": state_group_id, "event_id": event_id}, - ) diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py deleted file mode 100644 index 9d5cb60d16..0000000000 --- a/tests/rest/client/test_room_batch.py +++ /dev/null @@ -1,302 +0,0 @@ -import logging -from typing import List, Tuple -from unittest.mock import Mock, patch - -from twisted.test.proto_helpers import MemoryReactor - -from synapse.api.constants import EventContentFields, EventTypes -from synapse.appservice import ApplicationService -from synapse.rest import admin -from synapse.rest.client import login, register, room, room_batch, sync -from synapse.server import HomeServer -from synapse.types import JsonDict, RoomStreamToken -from synapse.util import Clock - -from tests import unittest - -logger = logging.getLogger(__name__) - - -def _create_join_state_events_for_batch_send_request( - virtual_user_ids: List[str], - insert_time: int, -) -> List[JsonDict]: - return [ - { - "type": EventTypes.Member, - "sender": virtual_user_id, - "origin_server_ts": insert_time, - "content": { - "membership": "join", - "displayname": "display-name-for-%s" % (virtual_user_id,), - }, - "state_key": virtual_user_id, - } - for virtual_user_id in virtual_user_ids - ] - - -def _create_message_events_for_batch_send_request( - virtual_user_id: str, insert_time: int, count: int -) -> List[JsonDict]: - return [ - { - "type": EventTypes.Message, - "sender": virtual_user_id, - "origin_server_ts": insert_time, - "content": { - "msgtype": "m.text", - "body": "Historical %d" % (i), - EventContentFields.MSC2716_HISTORICAL: True, - }, - } - for i in range(count) - ] - - -class RoomBatchTestCase(unittest.HomeserverTestCase): - """Test importing batches of historical messages.""" - - servlets = [ - admin.register_servlets_for_client_rest_resource, - room_batch.register_servlets, - room.register_servlets, - register.register_servlets, - login.register_servlets, - sync.register_servlets, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - - self.appservice = ApplicationService( - token="i_am_an_app_service", - id="1234", - namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, - # Note: this user does not have to match the regex above - sender="@as_main:test", - ) - - mock_load_appservices = Mock(return_value=[self.appservice]) - with patch( - "synapse.storage.databases.main.appservice.load_appservices", - mock_load_appservices, - ): - hs = self.setup_test_homeserver(config=config) - return hs - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.clock = clock - self._storage_controllers = hs.get_storage_controllers() - - self.virtual_user_id, _ = self.register_appservice_user( - "as_user_potato", self.appservice.token - ) - - def _create_test_room(self) -> Tuple[str, str, str, str]: - room_id = self.helper.create_room_as( - self.appservice.sender, tok=self.appservice.token - ) - - res_a = self.helper.send_event( - room_id=room_id, - type=EventTypes.Message, - content={ - "msgtype": "m.text", - "body": "A", - }, - tok=self.appservice.token, - ) - event_id_a = res_a["event_id"] - - res_b = self.helper.send_event( - room_id=room_id, - type=EventTypes.Message, - content={ - "msgtype": "m.text", - "body": "B", - }, - tok=self.appservice.token, - ) - event_id_b = res_b["event_id"] - - res_c = self.helper.send_event( - room_id=room_id, - type=EventTypes.Message, - content={ - "msgtype": "m.text", - "body": "C", - }, - tok=self.appservice.token, - ) - event_id_c = res_c["event_id"] - - return room_id, event_id_a, event_id_b, event_id_c - - @unittest.override_config({"experimental_features": {"msc2716_enabled": True}}) - def test_same_state_groups_for_whole_historical_batch(self) -> None: - """Make sure that when using the `/batch_send` endpoint to import a - bunch of historical messages, it re-uses the same `state_group` across - the whole batch. This is an easy optimization to make sure we're getting - right because the state for the whole batch is contained in - `state_events_at_start` and can be shared across everything. - """ - - time_before_room = int(self.clock.time_msec()) - room_id, event_id_a, _, _ = self._create_test_room() - - channel = self.make_request( - "POST", - "/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s" - % (room_id, event_id_a), - content={ - "events": _create_message_events_for_batch_send_request( - self.virtual_user_id, time_before_room, 3 - ), - "state_events_at_start": _create_join_state_events_for_batch_send_request( - [self.virtual_user_id], time_before_room - ), - }, - access_token=self.appservice.token, - ) - self.assertEqual(channel.code, 200, channel.result) - - # Get the historical event IDs that we just imported - historical_event_ids = channel.json_body["event_ids"] - self.assertEqual(len(historical_event_ids), 3) - - # Fetch the state_groups - state_group_map = self.get_success( - self._storage_controllers.state.get_state_groups_ids( - room_id, historical_event_ids - ) - ) - - # We expect all of the historical events to be using the same state_group - # so there should only be a single state_group here! - self.assertEqual( - len(state_group_map.keys()), - 1, - "Expected a single state_group to be returned by saw state_groups=%s" - % (state_group_map.keys(),), - ) - - @unittest.override_config({"experimental_features": {"msc2716_enabled": True}}) - def test_sync_while_batch_importing(self) -> None: - """ - Make sure that /sync correctly returns full room state when a user joins - during ongoing batch backfilling. - See: https://github.com/matrix-org/synapse/issues/12281 - """ - # Create user who will be invited & join room - user_id = self.register_user("beep", "test") - user_tok = self.login("beep", "test") - - time_before_room = int(self.clock.time_msec()) - - # Create a room with some events - room_id, _, _, _ = self._create_test_room() - # Invite the user - self.helper.invite( - room_id, src=self.appservice.sender, tok=self.appservice.token, targ=user_id - ) - - # Create another room, send a bunch of events to advance the stream token - other_room_id = self.helper.create_room_as( - self.appservice.sender, tok=self.appservice.token - ) - for _ in range(5): - self.helper.send_event( - room_id=other_room_id, - type=EventTypes.Message, - content={"msgtype": "m.text", "body": "C"}, - tok=self.appservice.token, - ) - - # Join the room as the normal user - self.helper.join(room_id, user_id, tok=user_tok) - - # Create an event to hang the historical batch from - In order to see - # the failure case originally reported in #12281, the historical batch - # must be hung from the most recent event in the room so the base - # insertion event ends up with the highest `topogological_ordering` - # (`depth`) in the room but will have a negative `stream_ordering` - # because it's a `historical` event. Previously, when assembling the - # `state` for the `/sync` response, the bugged logic would sort by - # `topological_ordering` descending and pick up the base insertion - # event because it has a negative `stream_ordering` below the given - # pagination token. Now we properly sort by `stream_ordering` - # descending which puts `historical` events with a negative - # `stream_ordering` way at the bottom and aren't selected as expected. - response = self.helper.send_event( - room_id=room_id, - type=EventTypes.Message, - content={ - "msgtype": "m.text", - "body": "C", - }, - tok=self.appservice.token, - ) - event_to_hang_id = response["event_id"] - - channel = self.make_request( - "POST", - "/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s" - % (room_id, event_to_hang_id), - content={ - "events": _create_message_events_for_batch_send_request( - self.virtual_user_id, time_before_room, 3 - ), - "state_events_at_start": _create_join_state_events_for_batch_send_request( - [self.virtual_user_id], time_before_room - ), - }, - access_token=self.appservice.token, - ) - self.assertEqual(channel.code, 200, channel.result) - - # Now we need to find the invite + join events stream tokens so we can sync between - main_store = self.hs.get_datastores().main - events, next_key = self.get_success( - main_store.get_recent_events_for_room( - room_id, - 50, - end_token=main_store.get_room_max_token(), - ), - ) - invite_event_position = None - for event in events: - if ( - event.type == "m.room.member" - and event.content["membership"] == "invite" - ): - invite_event_position = self.get_success( - main_store.get_topological_token_for_event(event.event_id) - ) - break - - assert invite_event_position is not None, "No invite event found" - - # Remove the topological order from the token by re-creating w/stream only - invite_event_position = RoomStreamToken(None, invite_event_position.stream) - - # Sync everything after this token - since_token = self.get_success(invite_event_position.to_string(main_store)) - sync_response = self.make_request( - "GET", - f"/sync?since={since_token}", - access_token=user_tok, - ) - - # Assert that, for this room, the user was considered to have joined and thus - # receives the full state history - state_event_types = [ - event["type"] - for event in sync_response.json_body["rooms"]["join"][room_id]["state"][ - "events" - ] - ] - - assert ( - "m.room.create" in state_event_types - ), "Missing room full state in sync response" diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 4b8d8328d7..0f3b0744f1 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -20,7 +20,6 @@ from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import EventTypes from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, EventFormatVersions, @@ -924,216 +923,6 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"]) - def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo: - """ - Sets up a room with various insertion event backward extremities to test - backfill functions against. - - Returns: - _BackfillSetupInfo including the `room_id` to test against and - `depth_map` of events in the room - """ - room_id = "!backfill-room-test:some-host" - - depth_map: Dict[str, int] = { - "1": 1, - "2": 2, - "insertion_eventA": 3, - "3": 4, - "insertion_eventB": 5, - "4": 6, - "5": 7, - } - - def populate_db(txn: LoggingTransaction) -> None: - # Insert the room to satisfy the foreign key constraint of - # `event_failed_pull_attempts` - self.store.db_pool.simple_insert_txn( - txn, - "rooms", - { - "room_id": room_id, - "creator": "room_creator_user_id", - "is_public": True, - "room_version": "6", - }, - ) - - # Insert our server events - stream_ordering = 0 - for event_id, depth in depth_map.items(): - self.store.db_pool.simple_insert_txn( - txn, - table="events", - values={ - "event_id": event_id, - "type": EventTypes.MSC2716_INSERTION - if event_id.startswith("insertion_event") - else "test_regular_type", - "room_id": room_id, - "depth": depth, - "topological_ordering": depth, - "stream_ordering": stream_ordering, - "processed": True, - "outlier": False, - }, - ) - - if event_id.startswith("insertion_event"): - self.store.db_pool.simple_insert_txn( - txn, - table="insertion_event_extremities", - values={ - "event_id": event_id, - "room_id": room_id, - }, - ) - - stream_ordering += 1 - - self.get_success( - self.store.db_pool.runInteraction( - "_setup_room_for_insertion_backfill_tests_populate_db", - populate_db, - ) - ) - - return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map) - - def test_get_insertion_event_backward_extremities_in_room(self) -> None: - """ - Test to make sure only insertion event backward extremities that are - older and come before the `current_depth` are returned. - """ - setup_info = self._setup_room_for_insertion_backfill_tests() - room_id = setup_info.room_id - depth_map = setup_info.depth_map - - # Try at "insertion_eventB" - backfill_points = self.get_success( - self.store.get_insertion_event_backward_extremities_in_room( - room_id, depth_map["insertion_eventB"], limit=100 - ) - ) - backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] - self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"]) - - # Try at "insertion_eventA" - backfill_points = self.get_success( - self.store.get_insertion_event_backward_extremities_in_room( - room_id, depth_map["insertion_eventA"], limit=100 - ) - ) - backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] - # Event "2" has a depth of 2 but is not included here because we only - # know the approximate depth of 5 from our event "3". - self.assertListEqual(backfill_event_ids, ["insertion_eventA"]) - - def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted( - self, - ) -> None: - """ - Test to make sure that insertion events we have attempted to backfill - (and within backoff timeout duration) do not show up as an event to - backfill again. - """ - setup_info = self._setup_room_for_insertion_backfill_tests() - room_id = setup_info.room_id - depth_map = setup_info.depth_map - - # Record some attempts to backfill these events which will make - # `get_insertion_event_backward_extremities_in_room` exclude them - # because we haven't passed the backoff interval. - self.get_success( - self.store.record_event_failed_pull_attempt( - room_id, "insertion_eventA", "fake cause" - ) - ) - - # No time has passed since we attempted to backfill ^ - - # Try at "insertion_eventB" - backfill_points = self.get_success( - self.store.get_insertion_event_backward_extremities_in_room( - room_id, depth_map["insertion_eventB"], limit=100 - ) - ) - backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] - # Only the backfill points that we didn't record earlier exist here. - self.assertEqual(backfill_event_ids, ["insertion_eventB"]) - - def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration( - self, - ) -> None: - """ - Test to make sure after we fake attempt to backfill event - "insertion_eventA" many times, we can see retry and see the - "insertion_eventA" again after the backoff timeout duration has - exceeded. - """ - setup_info = self._setup_room_for_insertion_backfill_tests() - room_id = setup_info.room_id - depth_map = setup_info.depth_map - - # Record some attempts to backfill these events which will make - # `get_backfill_points_in_room` exclude them because we - # haven't passed the backoff interval. - self.get_success( - self.store.record_event_failed_pull_attempt( - room_id, "insertion_eventB", "fake cause" - ) - ) - self.get_success( - self.store.record_event_failed_pull_attempt( - room_id, "insertion_eventA", "fake cause" - ) - ) - self.get_success( - self.store.record_event_failed_pull_attempt( - room_id, "insertion_eventA", "fake cause" - ) - ) - self.get_success( - self.store.record_event_failed_pull_attempt( - room_id, "insertion_eventA", "fake cause" - ) - ) - self.get_success( - self.store.record_event_failed_pull_attempt( - room_id, "insertion_eventA", "fake cause" - ) - ) - - # Now advance time by 2 hours and we should only be able to see - # "insertion_eventB" because we have waited long enough for the single - # attempt (2^1 hours) but we still shouldn't see "insertion_eventA" - # because we haven't waited long enough for this many attempts. - self.reactor.advance(datetime.timedelta(hours=2).total_seconds()) - - # Try at "insertion_eventA" and make sure that "insertion_eventA" is not - # in the list because we've already attempted many times - backfill_points = self.get_success( - self.store.get_insertion_event_backward_extremities_in_room( - room_id, depth_map["insertion_eventA"], limit=100 - ) - ) - backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] - self.assertEqual(backfill_event_ids, []) - - # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and - # see if we can now backfill it - self.reactor.advance(datetime.timedelta(hours=20).total_seconds()) - - # Try at "insertion_eventA" again after we advanced enough time and we - # should see "insertion_eventA" again - backfill_points = self.get_success( - self.store.get_insertion_event_backward_extremities_in_room( - room_id, depth_map["insertion_eventA"], limit=100 - ) - ) - backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] - self.assertEqual(backfill_event_ids, ["insertion_eventA"]) - def test_get_event_ids_with_failed_pull_attempts(self) -> None: """ Test to make sure we properly get event_ids based on whether they have any From 10c509425fb6e36cba63de94402a9a5772cba54c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 10:28:43 +0100 Subject: [PATCH 146/562] Bump serde_json from 1.0.96 to 1.0.97 (#15797) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.96 to 1.0.97. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.96...v1.0.97) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9bb8225226..51ff26ec1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -340,9 +340,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "itoa", "ryu", From d3cd9881c0446526b7f71762e2ca8d464eb60b18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 10:28:57 +0100 Subject: [PATCH 147/562] Bump ruff from 0.0.265 to 0.0.272 (#15799) Bumps [ruff](https://github.com/charliermarsh/ruff) from 0.0.265 to 0.0.272. - [Release notes](https://github.com/charliermarsh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/BREAKING_CHANGES.md) - [Commits](https://github.com/charliermarsh/ruff/compare/v0.0.265...v0.0.272) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 38 +++++++++++++++++++------------------- pyproject.toml | 2 +- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/poetry.lock b/poetry.lock index cf4a89c85a..385963301e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2242,28 +2242,28 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.0.265" +version = "0.0.272" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.265-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:30ddfe22de6ce4eb1260408f4480bbbce998f954dbf470228a21a9b2c45955e4"}, - {file = "ruff-0.0.265-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:a11bd0889e88d3342e7bc514554bb4461bf6cc30ec115821c2425cfaac0b1b6a"}, - {file = "ruff-0.0.265-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a9b38bdb40a998cbc677db55b6225a6c4fadcf8819eb30695e1b8470942426b"}, - {file = "ruff-0.0.265-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8b44a245b60512403a6a03a5b5212da274d33862225c5eed3bcf12037eb19bb"}, - {file = "ruff-0.0.265-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b279fa55ea175ef953208a6d8bfbcdcffac1c39b38cdb8c2bfafe9222add70bb"}, - {file = "ruff-0.0.265-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5028950f7af9b119d43d91b215d5044976e43b96a0d1458d193ef0dd3c587bf8"}, - {file = "ruff-0.0.265-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4057eb539a1d88eb84e9f6a36e0a999e0f261ed850ae5d5817e68968e7b89ed9"}, - {file = "ruff-0.0.265-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d586e69ab5cbf521a1910b733412a5735936f6a610d805b89d35b6647e2a66aa"}, - {file = "ruff-0.0.265-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa17b13cd3f29fc57d06bf34c31f21d043735cc9a681203d634549b0e41047d1"}, - {file = "ruff-0.0.265-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:9ac13b11d9ad3001de9d637974ec5402a67cefdf9fffc3929ab44c2fcbb850a1"}, - {file = "ruff-0.0.265-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:62a9578b48cfd292c64ea3d28681dc16b1aa7445b7a7709a2884510fc0822118"}, - {file = "ruff-0.0.265-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0f9967f84da42d28e3d9d9354cc1575f96ed69e6e40a7d4b780a7a0418d9409"}, - {file = "ruff-0.0.265-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1d5a8de2fbaf91ea5699451a06f4074e7a312accfa774ad9327cde3e4fda2081"}, - {file = "ruff-0.0.265-py3-none-win32.whl", hash = "sha256:9e9db5ccb810742d621f93272e3cc23b5f277d8d00c4a79668835d26ccbe48dd"}, - {file = "ruff-0.0.265-py3-none-win_amd64.whl", hash = "sha256:f54facf286103006171a00ce20388d88ed1d6732db3b49c11feb9bf3d46f90e9"}, - {file = "ruff-0.0.265-py3-none-win_arm64.whl", hash = "sha256:c78470656e33d32ddc54e8482b1b0fc6de58f1195586731e5ff1405d74421499"}, - {file = "ruff-0.0.265.tar.gz", hash = "sha256:53c17f0dab19ddc22b254b087d1381b601b155acfa8feed514f0d6a413d0ab3a"}, + {file = "ruff-0.0.272-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:ae9b57546e118660175d45d264b87e9b4c19405c75b587b6e4d21e6a17bf4fdf"}, + {file = "ruff-0.0.272-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:1609b864a8d7ee75a8c07578bdea0a7db75a144404e75ef3162e0042bfdc100d"}, + {file = "ruff-0.0.272-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee76b4f05fcfff37bd6ac209d1370520d509ea70b5a637bdf0a04d0c99e13dff"}, + {file = "ruff-0.0.272-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48eccf225615e106341a641f826b15224b8a4240b84269ead62f0afd6d7e2d95"}, + {file = "ruff-0.0.272-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:677284430ac539bb23421a2b431b4ebc588097ef3ef918d0e0a8d8ed31fea216"}, + {file = "ruff-0.0.272-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9c4bfb75456a8e1efe14c52fcefb89cfb8f2a0d31ed8d804b82c6cf2dc29c42c"}, + {file = "ruff-0.0.272-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86bc788245361a8148ff98667da938a01e1606b28a45e50ac977b09d3ad2c538"}, + {file = "ruff-0.0.272-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27b2ea68d2aa69fff1b20b67636b1e3e22a6a39e476c880da1282c3e4bf6ee5a"}, + {file = "ruff-0.0.272-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd2bbe337a3f84958f796c77820d55ac2db1e6753f39d1d1baed44e07f13f96d"}, + {file = "ruff-0.0.272-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d5a208f8ef0e51d4746930589f54f9f92f84bb69a7d15b1de34ce80a7681bc00"}, + {file = "ruff-0.0.272-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:905ff8f3d6206ad56fcd70674453527b9011c8b0dc73ead27618426feff6908e"}, + {file = "ruff-0.0.272-py3-none-musllinux_1_2_i686.whl", hash = "sha256:19643d448f76b1eb8a764719072e9c885968971bfba872e14e7257e08bc2f2b7"}, + {file = "ruff-0.0.272-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:691d72a00a99707a4e0b2846690961157aef7b17b6b884f6b4420a9f25cd39b5"}, + {file = "ruff-0.0.272-py3-none-win32.whl", hash = "sha256:dc406e5d756d932da95f3af082814d2467943631a587339ee65e5a4f4fbe83eb"}, + {file = "ruff-0.0.272-py3-none-win_amd64.whl", hash = "sha256:a37ec80e238ead2969b746d7d1b6b0d31aa799498e9ba4281ab505b93e1f4b28"}, + {file = "ruff-0.0.272-py3-none-win_arm64.whl", hash = "sha256:06b8ee4eb8711ab119db51028dd9f5384b44728c23586424fd6e241a5b9c4a3b"}, + {file = "ruff-0.0.272.tar.gz", hash = "sha256:273a01dc8c3c4fd4c2af7ea7a67c8d39bb09bce466e640dd170034da75d14cab"}, ] [[package]] @@ -3291,4 +3291,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "7ad11e62a675e09444cf33ca2de3216fc4efc5874a2575e54d95d577a52439d3" +content-hash = "090924370b17fd265407b5a3f9cbc00997308f575b455399b39a48e3ca1a5a8e" diff --git a/pyproject.toml b/pyproject.toml index 097bd03943..7f2f7927c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -311,7 +311,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.265" +ruff = "0.0.272" # Typechecking lxml-stubs = ">=0.4.0" From 207cbe519dff0d97ec6e1d2134c9cc82dd944aa2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 10:29:10 +0100 Subject: [PATCH 148/562] Bump phonenumbers from 8.13.13 to 8.13.14 (#15798) Bumps [phonenumbers](https://github.com/daviddrysdale/python-phonenumbers) from 8.13.13 to 8.13.14. - [Commits](https://github.com/daviddrysdale/python-phonenumbers/compare/v8.13.13...v8.13.14) --- updated-dependencies: - dependency-name: phonenumbers dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 385963301e..9549e1c6a0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1583,13 +1583,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.13" +version = "8.13.14" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.13-py2.py3-none-any.whl", hash = "sha256:55657adb607484aba6d56270b8a1f9b302f35496076e6c02051d06ed366374d9"}, - {file = "phonenumbers-8.13.13.tar.gz", hash = "sha256:4bdf8c989aff0cdb105aef170ad2c21f14b4537bcb32cf349f1f710df992a40a"}, + {file = "phonenumbers-8.13.14-py2.py3-none-any.whl", hash = "sha256:a4b20b6ba7dd402728f5cc8e86e1f29b1a873af45f5381dbee7e3083af497ff6"}, + {file = "phonenumbers-8.13.14.tar.gz", hash = "sha256:5fa952b4abf9fccdaf1f130d96114a520c48890d4091b50a064e22c0fdc12dec"}, ] [[package]] From 5f9d5190aa214c4ef119293a8368e9f589c5ed35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 10:30:03 +0100 Subject: [PATCH 149/562] Bump attrs from 22.2.0 to 23.1.0 (#15801) Bumps [attrs](https://github.com/python-attrs/attrs) from 22.2.0 to 23.1.0. - [Release notes](https://github.com/python-attrs/attrs/releases) - [Changelog](https://github.com/python-attrs/attrs/blob/main/CHANGELOG.md) - [Commits](https://github.com/python-attrs/attrs/compare/22.2.0...23.1.0) --- updated-dependencies: - dependency-name: attrs dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9549e1c6a0..88c8960b0a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -32,21 +32,24 @@ wrapt = [ [[package]] name = "attrs" -version = "22.2.0" +version = "23.1.0" description = "Classes Without Boilerplate" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, - {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, ] +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + [package.extras] -cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] -tests = ["attrs[tests-no-zope]", "zope.interface"] -tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] [[package]] name = "authlib" From 4ba528d9c31e1af75f3532ac2a52c1b12365f592 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 10:30:17 +0100 Subject: [PATCH 150/562] Bump ijson from 3.2.0.post0 to 3.2.1 (#15802) Bumps [ijson](https://github.com/ICRAR/ijson) from 3.2.0.post0 to 3.2.1. - [Changelog](https://github.com/ICRAR/ijson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ICRAR/ijson/compare/v3.2.0.post0...v3.2.1) --- updated-dependencies: - dependency-name: ijson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 158 ++++++++++++++++++++++++++-------------------------- 1 file changed, 79 insertions(+), 79 deletions(-) diff --git a/poetry.lock b/poetry.lock index 88c8960b0a..88f1e9548a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -728,89 +728,89 @@ files = [ [[package]] name = "ijson" -version = "3.2.0.post0" +version = "3.2.1" description = "Iterative JSON parser with standard Python iterator interfaces" optional = false python-versions = "*" files = [ - {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5809752045ef74c26adf159ed03df7fb7e7a8d656992fd7562663ed47d6d39d9"}, - {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce4be2beece2629bd24bcab147741d1532bd5ed40fb52f2b4fcde5c5bf606df0"}, - {file = "ijson-3.2.0.post0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5d365df54d18076f1d5f2ffb1eef2ac7f0d067789838f13d393b5586fbb77b02"}, - {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c93ae4d49d8cf8accfedc8a8e7815851f56ceb6e399b0c186754a68fed22844"}, - {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47a56e3628c227081a2aa58569cbf2af378bad8af648aa904080e87cd6644cfb"}, - {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8af68fe579f6f0b9a8b3f033d10caacfed6a4b89b8c7a1d9478a8f5d8aba4a1"}, - {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6eed1ddd3147de49226db4f213851cf7860493a7b6c7bd5e62516941c007094c"}, - {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9ecbf85a6d73fc72f6534c38f7d92ed15d212e29e0dbe9810a465d61c8a66d23"}, - {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd218b338ac68213c997d4c88437c0e726f16d301616bf837e1468901934042c"}, - {file = "ijson-3.2.0.post0-cp310-cp310-win32.whl", hash = "sha256:4e7c4fdc7d24747c8cc7d528c145afda4de23210bf4054bd98cd63bf07e4882d"}, - {file = "ijson-3.2.0.post0-cp310-cp310-win_amd64.whl", hash = "sha256:4d4e143908f47307042c9678803d27706e0e2099d0a6c1988c6cae1da07760bf"}, - {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56500dac8f52989ef7c0075257a8b471cbea8ef77f1044822742b3cbf2246e8b"}, - {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:535665a77408b6bea56eb828806fae125846dff2e2e0ed4cb2e0a8e36244d753"}, - {file = "ijson-3.2.0.post0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a4465c90b25ca7903410fabe4145e7b45493295cc3b84ec1216653fbe9021276"}, - {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efee1e9b4f691e1086730f3010e31c55625bc2e0f7db292a38a2cdf2774c2e13"}, - {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd55f7a46429de95383fc0d0158c1bfb798e976d59d52830337343c2d9bda5c"}, - {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25919b444426f58dcc62f763d1c6be6297f309da85ecab55f51da6ca86fc9fdf"}, - {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c85892d68895ba7a0b16a0e6b7d9f9a0e30e86f2b1e0f6986243473ba8735432"}, - {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27409ba44cfd006901971063d37699f72e092b5efaa1586288b5067d80c6b5bd"}, - {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:11dfd64633fe1382c4237477ac3836f682ca17e25e0d0799e84737795b0611df"}, - {file = "ijson-3.2.0.post0-cp311-cp311-win32.whl", hash = "sha256:41e955e173f77f54337fecaaa58a35c464b75e232b1f939b282497134a4d4f0e"}, - {file = "ijson-3.2.0.post0-cp311-cp311-win_amd64.whl", hash = "sha256:b3bdd2e12d9b9a18713dd6f3c5ef3734fdab25b79b177054ba9e35ecc746cb6e"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:26b57838e712b8852c40ec6d74c6de8bb226446440e1af1354c077a6f81b9142"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6464242f7895268d3086d7829ef031b05c77870dad1e13e51ef79d0a9cfe029"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3c6cf18b61b94db9590f86af0dd60edbccb36e151643152b8688066f677fbc9"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:992e9e68003df32e2aa0f31eb82c0a94f21286203ab2f2b2c666410e17b59d2f"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d3e255ef05b434f20fc9d4b18ea15733d1038bec3e4960d772b06216fa79e82d"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:424232c2bf3e8181f1b572db92c179c2376b57eba9fc8931453fba975f48cb80"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bced6cd5b09d4d002dda9f37292dd58d26eb1c4d0d179b820d3708d776300bb4"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-win32.whl", hash = "sha256:a8c84dff2d60ae06d5280ec87cd63050bbd74a90c02bfc7c390c803cfc8ac8fc"}, - {file = "ijson-3.2.0.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:a340413a9bf307fafd99254a4dd4ac6c567b91a205bf896dde18888315fd7fcd"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b3456cd5b16ec9db3ef23dd27f37bf5a14f765e8272e9af3e3de9ee9a4cba867"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eb838b4e4360e65c00aa13c78b35afc2477759d423b602b60335af5bed3de5b"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7f414edd69dd9199b0dfffa0ada22f23d8009e10fe2a719e0993b7dcc2e6e2"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:183841b8d033ca95457f61fb0719185dc7f51a616070bdf1dcaf03473bed05b2"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1302dc6490da7d44c3a76a5f0b87d8bec9f918454c6d6e6bf4ed922e47da58bb"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3b21b1ecd20ed2f918f6f99cdfa68284a416c0f015ffa64b68fa933df1b24d40"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e97e6e07851cefe7baa41f1ebf5c0899d2d00d94bfef59825752e4c784bebbe8"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-win32.whl", hash = "sha256:cd0450e76b9c629b7f86e7d5b91b7cc9c281dd719630160a992b19a856f7bdbd"}, - {file = "ijson-3.2.0.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:bed8dcb7dbfdb98e647ad47676045e0891f610d38095dcfdae468e1e1efb2766"}, - {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a7698bc480df76073067017f73ba4139dbaae20f7a6c9a0c7855b9c5e9a62124"}, - {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2f204f6d4cedeb28326c230a0b046968b5263c234c65a5b18cee22865800fff7"}, - {file = "ijson-3.2.0.post0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9829a17f6f78d7f4d0aeff28c126926a1e5f86828ebb60d6a0acfa0d08457f9f"}, - {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f470f3d750e00df86e03254fdcb422d2f726f4fb3a0d8eeee35e81343985e58a"}, - {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb167ee21d9c413d6b0ab65ec12f3e7ea0122879da8b3569fa1063526f9f03a8"}, - {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84eed88177f6c243c52b280cb094f751de600d98d2221e0dec331920894889ec"}, - {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:53f1a13eb99ab514c562869513172135d4b55a914b344e6518ba09ad3ef1e503"}, - {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f6785ba0f65eb64b1ce3b7fcfec101085faf98f4e77b234f14287fd4138ffb25"}, - {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:79b94662c2e9d366ab362c2c5858097eae0da100dea0dfd340db09ab28c8d5e8"}, - {file = "ijson-3.2.0.post0-cp38-cp38-win32.whl", hash = "sha256:5242cb2313ba3ece307b426efa56424ac13cc291c36f292b501d412a98ad0703"}, - {file = "ijson-3.2.0.post0-cp38-cp38-win_amd64.whl", hash = "sha256:775444a3b647350158d0b3c6c39c88b4a0995643a076cb104bf25042c9aedcf8"}, - {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1d64ffaab1d006a4fa9584a4c723e95cc9609bf6c3365478e250cd0bffaaadf3"}, - {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:434e57e7ec5c334ccb0e67bb4d9e60c264dcb2a3843713dbeb12cb19fe42a668"}, - {file = "ijson-3.2.0.post0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:158494bfe89ccb32618d0e53b471364080ceb975462ec464d9f9f37d9832b653"}, - {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f20072376e338af0e51ccecb02335b4e242d55a9218a640f545be7fc64cca99"}, - {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3e8d46c1004afcf2bf513a8fb575ee2ec3d8009a2668566b5926a2dcf7f1a45"}, - {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:986a0347fe19e5117a5241276b72add570839e5bcdc7a6dac4b538c5928eeff5"}, - {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:535a59d61b9aef6fc2a3d01564c1151e38e5a44b92cd6583cb4e8ccf0f58043f"}, - {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:830de03f391f7e72b8587bb178c22d534da31153e9ee4234d54ef82cde5ace5e"}, - {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6def9ac8d73b76cb02e9e9837763f27f71e5e67ec0afae5f1f4cf8f61c39b1ac"}, - {file = "ijson-3.2.0.post0-cp39-cp39-win32.whl", hash = "sha256:11bb84a53c37e227e733c6dffad2037391cf0b3474bff78596dc4373b02008a0"}, - {file = "ijson-3.2.0.post0-cp39-cp39-win_amd64.whl", hash = "sha256:f349bee14d0a4a72ba41e1b1cce52af324ebf704f5066c09e3dd04cfa6f545f0"}, - {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5418066666b25b05f2b8ae2698408daa0afa68f07b0b217f2ab24465b7e9cbd9"}, - {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ccc4d4b947549f9c431651c02b95ef571412c78f88ded198612a41d5c5701a0"}, - {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcec67fc15e5978ad286e8cc2a3f9347076e28e0e01673b5ace18c73da64e3ff"}, - {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee9537e8a8aa15dd2d0912737aeb6265e781e74f7f7cad8165048fcb5f39230"}, - {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:03dfd4c8ed19e704d04b0ad4f34f598dc569fd3f73089f80eed698e7f6069233"}, - {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2d50b2ad9c6c51ca160aa60de7f4dacd1357c38d0e503f51aed95c1c1945ff53"}, - {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c1db80d7791fb761ad9a6c70f521acd2c4b0e5afa2fe0d813beb2140d16c37"}, - {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13f2939db983327dd0492f6c1c0e77be3f2cbf9b620c92c7547d1d2cd6ef0486"}, - {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f9d449f86f8971c24609e319811f7f3b6b734f0218c4a0e799debe19300d15b"}, - {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7e0d1713a9074a7677eb8e43f424b731589d1c689d4676e2f57a5ce59d089e89"}, - {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c8646eb81eec559d7d8b1e51a5087299d06ecab3bc7da54c01f7df94350df135"}, - {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fe3a53e00c59de33b825ba8d6d39f544a7d7180983cd3d6bd2c3794ae35442"}, - {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93aaec00cbde65c192f15c21f3ee44d2ab0c11eb1a35020b5c4c2676f7fe01d0"}, - {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00594ed3ef2218fee8c652d9e7f862fb39f8251b67c6379ef12f7e044bf6bbf3"}, - {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1a75cfb34217b41136b714985be645f12269e4345da35d7b48aabd317c82fd10"}, - {file = "ijson-3.2.0.post0.tar.gz", hash = "sha256:80a5bd7e9923cab200701f67ad2372104328b99ddf249dbbe8834102c852d316"}, + {file = "ijson-3.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6f827f6961f093e1055a2be0c3137f0e7d667979da455ac9648f72d4a2bb8970"}, + {file = "ijson-3.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b6e51f4497065cd0d09f5e906cd538a8d22609eab716e3c883769acf147ab1b6"}, + {file = "ijson-3.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f022686c40bff3e340627a5a0c9212718d529e787ada3b76ba546d47a9ecdbbd"}, + {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4105c15a13fa1dc24ebd3bf2e679fa14dcbfcc48bc39138a0fa3f4ddf6cc09b"}, + {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:404423e666f185dfb753ddc92705c84dffdc4cc872aaf825bbe0607893cb5b02"}, + {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39e71f32830827cf21d0233a814092e5a23668e18f52eca5cac4f670d9df1240"}, + {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43af7ed5292caa1452747e2b62485b6c0ece4bcbc5bf6f2758abd547e4124a14"}, + {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e805aa6897a11b0f73f1f6bca078981df8960aeeccf527a214f240409c742bab"}, + {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5b2df0bd84889e9017a670328fe3e82ec509fd6744c7ac2c99c7ee2300d76afa"}, + {file = "ijson-3.2.1-cp310-cp310-win32.whl", hash = "sha256:675259c7ea7f51ffaf8cb9e79bf875e28bb09622892943f4f415588fd7ab7bec"}, + {file = "ijson-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:90d4b2eb771a3585c8186820fe50e3282ef62477b865e765a50a8295674abeac"}, + {file = "ijson-3.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fc581a61e210bf6013c1fa6536566e51127be1cfbd69539b63d8b813206d2fe0"}, + {file = "ijson-3.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75cdf7ad4c00a8f5ac94ff27e3b7c1bf5ac463f125bca2be1744c5bc9600db5c"}, + {file = "ijson-3.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85a2bf4636ace4d92e7c5d857a1c5694f42407c868953cf2927f18127bcd0d58"}, + {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe0cb66e7dd4aa11da5fff60bdf5ee04819a5e6a57acf7ca12c65f7fc009afc"}, + {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6f7957ad38cb714378944032f2c2ee9c6531b5b0b38c5ccd08cedbb0ceddd02"}, + {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13283d264cca8a63e5bad91e82eec39711e95893e7e8d4a419799a8c5f85203a"}, + {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:12c24cde850fe79bc806be0e9fc38b47dd5ac0a223070ccb12e9b695425e2936"}, + {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2ce8eed838e5a0791cb5948117b5453f2b3b3c28d93d06ee2bbf2c198c47881c"}, + {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b81c2589f191b0dc741f532be00b4bea617297dd9698431c8053e2d28272d4db"}, + {file = "ijson-3.2.1-cp311-cp311-win32.whl", hash = "sha256:ba2beac56ac96f728d0f2430e4c667c66819a423d321bb9db9ebdebd803e1b5b"}, + {file = "ijson-3.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:c71614ed4bbc6a32ff1e42d7ce92a176fb67d658913343792d2c4567aa130817"}, + {file = "ijson-3.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:683fc8b0ea085e75ea34044fdc70649b37367d494f132a2bd1e59d7135054d89"}, + {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deeaecec2f4e20e8bec20b0a5cdc34daebe7903f2e700f7dcaef68b5925d35ea"}, + {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11923ac3188877f19dbb7051f7345202701cc39bf8e5ac44f8ae536c9eca8c82"}, + {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:400deefcdae21e90fc39c1dcfc6ba2df24537e8c65bd57b763ed5256b73ba64d"}, + {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:56bc4bad53770710a3a91944fe640fdeb269987a14352b74ebbad2aa55801c00"}, + {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f5a179523e085126844c6161aabcd193dbb5747bd01fadb68e92abf048f32ec9"}, + {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ee24655986e4415fbb7a0cf51445fff3072ceac0e219f4bbbd5c53535a3c5680"}, + {file = "ijson-3.2.1-cp36-cp36m-win32.whl", hash = "sha256:4a5c672b0540005c1bb0bba97aa559a87a2e4ee409fc68e2f5ba5b30f009ac99"}, + {file = "ijson-3.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cfaf1d89b0e122e69c87a15db6d6f44feb9db96d2af7fe88cdc464177a257b5d"}, + {file = "ijson-3.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1cbd052eb67c1b3611f25974ba967886e89391faaf55afec93808c19f06ca612"}, + {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13ffc491886e5d7bde7d68712d168bce0141b2a918db1164bc8599c0123e293"}, + {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc4c4fc6bafc777f8422fe36edb1cbd72a13cb29695893a064c9c95776a4bdf9"}, + {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42fcb2bf9748c26f004690b2feb6e13e4875bb7c9d83535f887c21e0a982a7c"}, + {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0c92f7bc2f3a947c2ba7f7aa48382c36079f8259c930e81d9164341f9b853c45"}, + {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd497042db562379339660e787bc8679ed3abaa740768d39bc3746e769e7c7a5"}, + {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7d61c7cd8ddd75dcef818ff5a111a31b902a6a0e410ee0c2b2ecaa6dac92658a"}, + {file = "ijson-3.2.1-cp37-cp37m-win32.whl", hash = "sha256:36caf624d263fc40e7e805d759d09ea368d8cf497aecb3241ac2f0a286ad8eca"}, + {file = "ijson-3.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:32f9ed25ff80942e433119600bca13b86a8f9b8b0966edbc1d91a48ccbdd4d54"}, + {file = "ijson-3.2.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e89bbd747140eac3a3c9e7e5835b90d85c4a02763fc5134861bfc1ea03b66ae7"}, + {file = "ijson-3.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d69b4b1d509de36ec42a0e4af30ede39fb754e4039b2928ef7282ebc2125ffdd"}, + {file = "ijson-3.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e7feb0771f50deabe6ce85b210fa9e005843d3d3c60fb3315d69e1f9d0d75e0c"}, + {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fd8148a363888054ff06eaaa1103f2f98720ab39666084a214e4fedfc13cf64"}, + {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:598638dcc5141e9ae269903901877103f5362e0db4443e34721df8f8d34577b4"}, + {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e979190b7d0fabca20d6b7224ab1c1aa461ad1ab72ba94f1bb1e5894cd59f342"}, + {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bc810eb80b4f486c7957201ba2a53f53ddc9b3233af67e4359e29371bf04883b"}, + {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:26e758584611dfe826dd18ffd94dc0d8a062ce56e41674ad3bfa371c7b78c4b5"}, + {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:24e9ae5b35b85ea094b6c36495bc856089254aed6a48bada8d7eec5a04f74439"}, + {file = "ijson-3.2.1-cp38-cp38-win32.whl", hash = "sha256:4b5dc7b5b4b8cb3087d188f37911cd67e26672d33d3571e73440de3f0a86f7e6"}, + {file = "ijson-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:1af94ff40609270bbb3eac47e072582bb578f5023fac8408cccd80fe5892d221"}, + {file = "ijson-3.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2dda67affceebc52c8bc5fe72c3a4a1e338e4d4b0497dbac5089c2d3862df214"}, + {file = "ijson-3.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd780303ddfedc8d57cdb9f2d53a8cea2f2f4a6fb857bf8fe5a0c3ab1d4ca901"}, + {file = "ijson-3.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4fbab6af1bab88a8e46beda08cf44610eed0adb8d157a1a60b4bb6c3a121c6de"}, + {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97a07988a1e0ce2bc8e8a62eb5f25195a3bd58a939ac353cbc6018a548cc08d"}, + {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a65671a6826ae723837143914c823ad7bcc0d1a3e38d87c71df897a2556fb48f"}, + {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1806372008bbed9ee92db5747e38c047fa1c4ee89cb2dd5daaa57feb46ce50a"}, + {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:91e5a8e96f78a59e2520078c227a4fec5bf91c13adeded9e33fb13981cb823c3"}, + {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1f820fce8ef093718f2319ff6f1322390664659b783775919dadccb1b470153d"}, + {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bca3e8c91a1076a20620dbaa6a2848772b0e8a4055e86d42d3fa39221b53ed1a"}, + {file = "ijson-3.2.1-cp39-cp39-win32.whl", hash = "sha256:de87f137b7438d43840f4339a37d4e6a58c987f4bb2a70609969f854f8ae20f3"}, + {file = "ijson-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:0caebb8350b47266a58b766ec08e1de441d6d160702c428b5cf7504d93c832c4"}, + {file = "ijson-3.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37389785c1abd27fcc24800fcfa9a6b1022743413e4056507fd32356b623ff33"}, + {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b364b82231d51cbeae52468c3b27e8a042e544ab764c8f3975e912cf010603f"}, + {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a5999d0ec28a8ec47cf20c736fd4f895dc077bf6441bf237b00b074315a295d"}, + {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd481857a39305517fb6f1313d558c2dc4e78c9e9384cc5bc1c3e28f1afbedf"}, + {file = "ijson-3.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:545f62f12f89350d4d73f2a779cb269198ae578fac080085a1927148b803e602"}, + {file = "ijson-3.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4d5622505d01c2f3d7b9638c1eb8c747eb550936b505225893704289ff28576f"}, + {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20293bb36423b129fad3753858ccf7b2ccb5b2c0d3759efe810d0b9d79633a7e"}, + {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cd8a4921b852fd2cb5b0c985540c97ff6893139a57fe7121d510ec5d1c0ca44"}, + {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc902ff1ae1efed7d526294d7a9dd3df66d29b2cdc05fb5479838fef1327a534"}, + {file = "ijson-3.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2925a7978d8170146a9cb49a15a982b71fbbf21980bf2e16cd90c528545b7c02"}, + {file = "ijson-3.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c21c6509f6944939399f3630c5dc424d30d71d375f6cd58f9af56158fdf7251c"}, + {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5729fc7648bc972d70922d7dad15459cca3a9e5ed0328eb9ae3ffa004066194"}, + {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:805a2d5ed5a15d60327bc9347f2d4125ab621fb18071db98b1c598f1ee99e8f1"}, + {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d0220a4b6c63f44589e429157174e3f4b8d1e534d5fb82bdb43a7f8dd77ae4b"}, + {file = "ijson-3.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:271d9b7c083f65c58ff0afd9dbb5d2f3d445f734632aebfef4a261b0a337abdb"}, + {file = "ijson-3.2.1.tar.gz", hash = "sha256:8574bf19f31fab870488769ad919a80f130825236ac8bde9a733f69c2961d7a7"}, ] [[package]] From 887fa4b66b038c886634a3eef92af108e391be34 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 20 Jun 2023 04:05:31 -0500 Subject: [PATCH 151/562] Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic (#15806) `matrix://` is a registered specced scheme nowadays and doesn't make sense for our internal to Synapse use case anymore. ([discussion] (https://github.com/matrix-org/synapse/pull/15773#discussion_r1227598679)) --- changelog.d/15806.misc | 1 + contrib/lnav/synapse-log-format.json | 2 +- scripts-dev/federation_client.py | 4 +- .../federation/matrix_federation_agent.py | 14 ++++--- synapse/http/matrixfederationclient.py | 9 ++++- tests/federation/test_federation_client.py | 4 +- .../test_matrix_federation_agent.py | 38 ++++++++++--------- 7 files changed, 43 insertions(+), 29 deletions(-) create mode 100644 changelog.d/15806.misc diff --git a/changelog.d/15806.misc b/changelog.d/15806.misc new file mode 100644 index 0000000000..80d0eb2f8f --- /dev/null +++ b/changelog.d/15806.misc @@ -0,0 +1 @@ +Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic. diff --git a/contrib/lnav/synapse-log-format.json b/contrib/lnav/synapse-log-format.json index ad7017ee5e..649cd623e8 100644 --- a/contrib/lnav/synapse-log-format.json +++ b/contrib/lnav/synapse-log-format.json @@ -29,7 +29,7 @@ "level": "error" }, { - "line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')", + "line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix-federation://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')", "level": "warning" }, { diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index b1d5e2e616..63f0b25ddd 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -136,11 +136,11 @@ def request( authorization_headers.append(header) print("Authorization: %s" % header, file=sys.stderr) - dest = "matrix://%s%s" % (destination, path) + dest = "matrix-federation://%s%s" % (destination, path) print("Requesting %s" % dest, file=sys.stderr) s = requests.Session() - s.mount("matrix://", MatrixConnectionAdapter()) + s.mount("matrix-federation://", MatrixConnectionAdapter()) headers: Dict[str, str] = { "Authorization": authorization_headers[0], diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 7e8cf31682..91a24efcd0 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -51,8 +51,10 @@ logger = logging.getLogger(__name__) @implementer(IAgent) class MatrixFederationAgent: """An Agent-like thing which provides a `request` method which correctly - handles resolving matrix server names when using matrix://. Handles standard - https URIs as normal. + handles resolving matrix server names when using `matrix-federation://`. Handles + standard https URIs as normal. The `matrix-federation://` scheme is internal to + Synapse and we purposely want to avoid colliding with the `matrix://` URL scheme + which is now specced. Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.) @@ -167,14 +169,14 @@ class MatrixFederationAgent: # There must be a valid hostname. assert parsed_uri.hostname - # If this is a matrix:// URI check if the server has delegated matrix + # If this is a matrix-federation:// URI check if the server has delegated matrix # traffic using well-known delegation. # # We have to do this here and not in the endpoint as we need to rewrite # the host header with the delegated server name. delegated_server = None if ( - parsed_uri.scheme == b"matrix" + parsed_uri.scheme == b"matrix-federation" and not _is_ip_literal(parsed_uri.hostname) and not parsed_uri.port ): @@ -250,7 +252,7 @@ class MatrixHostnameEndpointFactory: @implementer(IStreamClientEndpoint) class MatrixHostnameEndpoint: - """An endpoint that resolves matrix:// URLs using Matrix server name + """An endpoint that resolves matrix-federation:// URLs using Matrix server name resolution (i.e. via SRV). Does not check for well-known delegation. Args: @@ -379,7 +381,7 @@ class MatrixHostnameEndpoint: connect to. """ - if self._parsed_uri.scheme != b"matrix": + if self._parsed_uri.scheme != b"matrix-federation": return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)] # Note: We don't do well-known lookup as that needs to have happened diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index abb5ae5815..fc0101808d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -174,7 +174,14 @@ class MatrixFederationRequest: # The object is frozen so we can pre-compute this. uri = urllib.parse.urlunparse( - (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"") + ( + b"matrix-federation", + destination_bytes, + path_bytes, + None, + query_bytes, + b"", + ) ) object.__setattr__(self, "uri", uri) diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index 91694e4fca..a45ab83683 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -124,7 +124,7 @@ class FederationClientTest(FederatingHomeserverTestCase): # check the right call got made to the agent self._mock_agent.request.assert_called_once_with( b"GET", - b"matrix://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id", + b"matrix-federation://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id", headers=mock.ANY, bodyProducer=None, ) @@ -232,7 +232,7 @@ class FederationClientTest(FederatingHomeserverTestCase): # check the right call got made to the agent self._mock_agent.request.assert_called_once_with( b"GET", - b"matrix://yet.another.server/_matrix/federation/v1/event/event_id", + b"matrix-federation://yet.another.server/_matrix/federation/v1/event/event_id", headers=mock.ANY, bodyProducer=None, ) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 105b4caefa..aed2a4c07a 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -292,7 +292,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.agent = self._make_agent() self.reactor.lookups["testserv"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv:8448/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -393,7 +393,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["proxy.com"] = "9.9.9.9" - test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv:8448/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -532,7 +532,7 @@ class MatrixFederationAgentTests(unittest.TestCase): # there will be a getaddrinfo on the IP self.reactor.lookups["1.2.3.4"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://1.2.3.4/foo/bar") + test_d = self._make_get_request(b"matrix-federation://1.2.3.4/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -568,7 +568,7 @@ class MatrixFederationAgentTests(unittest.TestCase): # there will be a getaddrinfo on the IP self.reactor.lookups["::1"] = "::1" - test_d = self._make_get_request(b"matrix://[::1]/foo/bar") + test_d = self._make_get_request(b"matrix-federation://[::1]/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -604,7 +604,7 @@ class MatrixFederationAgentTests(unittest.TestCase): # there will be a getaddrinfo on the IP self.reactor.lookups["::1"] = "::1" - test_d = self._make_get_request(b"matrix://[::1]:80/foo/bar") + test_d = self._make_get_request(b"matrix-federation://[::1]:80/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -639,7 +639,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) self.reactor.lookups["testserv1"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://testserv1/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv1/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -693,7 +693,7 @@ class MatrixFederationAgentTests(unittest.TestCase): # there will be a getaddrinfo on the IP self.reactor.lookups["1.2.3.5"] = "1.2.3.5" - test_d = self._make_get_request(b"matrix://1.2.3.5/foo/bar") + test_d = self._make_get_request(b"matrix-federation://1.2.3.5/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -725,7 +725,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) self.reactor.lookups["testserv"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://testserv/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -780,7 +780,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["target-server"] = "1::f" - test_d = self._make_get_request(b"matrix://testserv/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -844,7 +844,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["target-server"] = "1::f" - test_d = self._make_get_request(b"matrix://testserv/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -933,7 +933,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) self.reactor.lookups["testserv"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://testserv/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -1009,7 +1009,7 @@ class MatrixFederationAgentTests(unittest.TestCase): ), ) - test_d = agent.request(b"GET", b"matrix://testserv/foo/bar") + test_d = agent.request(b"GET", b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -1042,7 +1042,7 @@ class MatrixFederationAgentTests(unittest.TestCase): ) self.reactor.lookups["srvtarget"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://testserv/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -1082,7 +1082,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["srvtarget"] = "5.6.7.8" - test_d = self._make_get_request(b"matrix://testserv/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) @@ -1143,7 +1143,9 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.lookups["xn--bcher-kva.com"] = "1.2.3.4" # this is idna for bücher.com - test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar") + test_d = self._make_get_request( + b"matrix-federation://xn--bcher-kva.com/foo/bar" + ) # Nothing happened yet self.assertNoResult(test_d) @@ -1204,7 +1206,9 @@ class MatrixFederationAgentTests(unittest.TestCase): ) self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar") + test_d = self._make_get_request( + b"matrix-federation://xn--bcher-kva.com/foo/bar" + ) # Nothing happened yet self.assertNoResult(test_d) @@ -1411,7 +1415,7 @@ class MatrixFederationAgentTests(unittest.TestCase): ) self.reactor.lookups["target.com"] = "1.2.3.4" - test_d = self._make_get_request(b"matrix://testserv/foo/bar") + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") # Nothing happened yet self.assertNoResult(test_d) From 2301a09d7af2a7b1c85cceb9cc60eee895784c20 Mon Sep 17 00:00:00 2001 From: ew-at-vier <136321107+ew-at-vier@users.noreply.github.com> Date: Tue, 20 Jun 2023 12:45:26 +0200 Subject: [PATCH 152/562] Fix admin api documentation typo (#15805) * Fix admin api documentation typo Signed-off-by: Eric Wolf --- changelog.d/15805.doc | 1 + docs/admin_api/rooms.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15805.doc diff --git a/changelog.d/15805.doc b/changelog.d/15805.doc new file mode 100644 index 0000000000..446f627cfc --- /dev/null +++ b/changelog.d/15805.doc @@ -0,0 +1 @@ +Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html). diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 66b29e82dc..90b06045a8 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -419,7 +419,7 @@ The following query parameters are available: * `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint. -* `to` - The token to spot returning events at. +* `to` - The token to stop returning events at. * `limit` - The maximum number of events to return. Defaults to `10`. * `filter` - A JSON RoomEventFilter to filter returned events with. * `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting From 6a5cf1a759526d567b58e33ad18d6db9e3ebde25 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 20 Jun 2023 07:55:46 -0700 Subject: [PATCH 153/562] Fix Sytest environmental variable evaluation in CI (#15804) --- .github/workflows/tests.yml | 4 ++-- changelog.d/15804.bugfix | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15804.bugfix diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a0d1c24e90..6c22984997 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -399,8 +399,8 @@ jobs: env: SYTEST_BRANCH: ${{ github.head_ref }} POSTGRES: ${{ matrix.job.postgres && 1}} - MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}} - ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }} + MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }} + ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') || '' }} WORKERS: ${{ matrix.job.workers && 1 }} BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }} TOP: ${{ github.workspace }} diff --git a/changelog.d/15804.bugfix b/changelog.d/15804.bugfix new file mode 100644 index 0000000000..7c8b954397 --- /dev/null +++ b/changelog.d/15804.bugfix @@ -0,0 +1 @@ +Fix Sytest environmental variable evaluation in CI. From 7d3da399dd905d2a05da5a1941e996cbf2380e99 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 20 Jun 2023 17:22:50 +0200 Subject: [PATCH 154/562] 1.86.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f2f39c3b6e..ff4126044b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.86.0 (2023-06-20) +=========================== + +No significant changes since 1.86.0rc2. + + Synapse 1.86.0rc2 (2023-06-14) ============================== diff --git a/debian/changelog b/debian/changelog index 81b71ba342..9d057c65ef 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.86.0) stable; urgency=medium + + * New Synapse release 1.86.0. + + -- Synapse Packaging team Tue, 20 Jun 2023 17:22:46 +0200 + matrix-synapse-py3 (1.86.0~rc2) stable; urgency=medium * New Synapse release 1.86.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 097bd03943..3626be9797 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.86.0rc2" +version = "1.86.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 496f73103df838795b0e98f8c1c7337468e41abc Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 21 Jun 2023 10:41:11 +0200 Subject: [PATCH 155/562] Allow for the configuration of max request retries and min/max retry delays in the matrix federation client (#15783) --- changelog.d/15783.misc | 1 + .../configuration/config_documentation.md | 26 ++++++++ synapse/config/federation.py | 16 +++++ synapse/http/matrixfederationclient.py | 59 ++++++++++++------- tests/http/test_matrixfederationclient.py | 20 ++++++- 5 files changed, 100 insertions(+), 22 deletions(-) create mode 100644 changelog.d/15783.misc diff --git a/changelog.d/15783.misc b/changelog.d/15783.misc new file mode 100644 index 0000000000..0bebaa213d --- /dev/null +++ b/changelog.d/15783.misc @@ -0,0 +1 @@ +Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 0cf6e075ff..26d7c7900c 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1196,6 +1196,32 @@ Example configuration: allow_device_name_lookup_over_federation: true ``` --- +### `federation` + +The federation section defines some sub-options related to federation. + +The following options are related to configuring timeout and retry logic for one request, +independently of the others. +Short retry algorithm is used when something or someone will wait for the request to have an +answer, while long retry is used for requests that happen in the background, +like sending a federation transaction. + +* `client_timeout`: timeout for the federation requests. Default to 60s. +* `max_short_retry_delay`: maximum delay to be used for the short retry algo. Default to 2s. +* `max_long_retry_delay`: maximum delay to be used for the short retry algo. Default to 60s. +* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts. +* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts. + +Example configuration: +```yaml +federation: + client_timeout: 180s + max_short_retry_delay: 7s + max_long_retry_delay: 100s + max_short_retries: 5 + max_long_retries: 20 +``` +--- ## Caching Options related to caching. diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 336fca578a..0e1cb8b6e3 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -22,6 +22,8 @@ class FederationConfig(Config): section = "federation" def read_config(self, config: JsonDict, **kwargs: Any) -> None: + federation_config = config.setdefault("federation", {}) + # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist: Optional[dict] = None federation_domain_whitelist = config.get("federation_domain_whitelist", None) @@ -49,5 +51,19 @@ class FederationConfig(Config): "allow_device_name_lookup_over_federation", False ) + # Allow for the configuration of timeout, max request retries + # and min/max retry delays in the matrix federation client. + self.client_timeout_ms = Config.parse_duration( + federation_config.get("client_timeout", "60s") + ) + self.max_long_retry_delay_ms = Config.parse_duration( + federation_config.get("max_long_retry_delay", "60s") + ) + self.max_short_retry_delay_ms = Config.parse_duration( + federation_config.get("max_short_retry_delay", "2s") + ) + self.max_long_retries = federation_config.get("max_long_retries", 10) + self.max_short_retries = federation_config.get("max_short_retries", 3) + _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index fc0101808d..cc4e258b0f 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -95,8 +95,6 @@ incoming_responses_counter = Counter( ) -MAX_LONG_RETRIES = 10 -MAX_SHORT_RETRIES = 3 MAXINT = sys.maxsize @@ -413,7 +411,16 @@ class MatrixFederationHttpClient: self.clock = hs.get_clock() self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") - self.default_timeout = 60 + self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000 + + self.max_long_retry_delay_seconds = ( + hs.config.federation.max_long_retry_delay_ms / 1000 + ) + self.max_short_retry_delay_seconds = ( + hs.config.federation.max_short_retry_delay_ms / 1000 + ) + self.max_long_retries = hs.config.federation.max_long_retries + self.max_short_retries = hs.config.federation.max_short_retries self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor)) @@ -542,10 +549,10 @@ class MatrixFederationHttpClient: logger.exception(f"Invalid destination: {request.destination}.") raise FederationDeniedError(request.destination) - if timeout: + if timeout is not None: _sec_timeout = timeout / 1000 else: - _sec_timeout = self.default_timeout + _sec_timeout = self.default_timeout_seconds if ( self.hs.config.federation.federation_domain_whitelist is not None @@ -590,9 +597,9 @@ class MatrixFederationHttpClient: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: - retries_left = MAX_LONG_RETRIES + retries_left = self.max_long_retries else: - retries_left = MAX_SHORT_RETRIES + retries_left = self.max_short_retries url_bytes = request.uri url_str = url_bytes.decode("ascii") @@ -737,24 +744,34 @@ class MatrixFederationHttpClient: if retries_left and not timeout: if long_retries: - delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) - delay = min(delay, 60) - delay *= random.uniform(0.8, 1.4) + delay_seconds = 4 ** ( + self.max_long_retries + 1 - retries_left + ) + delay_seconds = min( + delay_seconds, self.max_long_retry_delay_seconds + ) + delay_seconds *= random.uniform(0.8, 1.4) else: - delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) - delay = min(delay, 2) - delay *= random.uniform(0.8, 1.4) + delay_seconds = 0.5 * 2 ** ( + self.max_short_retries - retries_left + ) + delay_seconds = min( + delay_seconds, self.max_short_retry_delay_seconds + ) + delay_seconds *= random.uniform(0.8, 1.4) logger.debug( "{%s} [%s] Waiting %ss before re-sending...", request.txn_id, request.destination, - delay, + delay_seconds, ) # Sleep for the calculated delay, or wake up immediately # if we get notified that the server is back up. - await self._sleeper.sleep(request.destination, delay * 1000) + await self._sleeper.sleep( + request.destination, delay_seconds * 1000 + ) retries_left -= 1 else: raise @@ -953,7 +970,7 @@ class MatrixFederationHttpClient: if timeout is not None: _sec_timeout = timeout / 1000 else: - _sec_timeout = self.default_timeout + _sec_timeout = self.default_timeout_seconds if parser is None: parser = cast(ByteParser[T], JsonParser()) @@ -1031,10 +1048,10 @@ class MatrixFederationHttpClient: ignore_backoff=ignore_backoff, ) - if timeout: + if timeout is not None: _sec_timeout = timeout / 1000 else: - _sec_timeout = self.default_timeout + _sec_timeout = self.default_timeout_seconds body = await _handle_response( self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() @@ -1142,7 +1159,7 @@ class MatrixFederationHttpClient: if timeout is not None: _sec_timeout = timeout / 1000 else: - _sec_timeout = self.default_timeout + _sec_timeout = self.default_timeout_seconds if parser is None: parser = cast(ByteParser[T], JsonParser()) @@ -1218,7 +1235,7 @@ class MatrixFederationHttpClient: if timeout is not None: _sec_timeout = timeout / 1000 else: - _sec_timeout = self.default_timeout + _sec_timeout = self.default_timeout_seconds body = await _handle_response( self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() @@ -1270,7 +1287,7 @@ class MatrixFederationHttpClient: try: d = read_body_with_max_size(response, output_stream, max_size) - d.addTimeout(self.default_timeout, self.reactor) + d.addTimeout(self.default_timeout_seconds, self.reactor) length = await make_deferred_yieldable(d) except BodyExceededMaxSize: msg = "Requested file is too large > %r bytes" % (max_size,) diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 0dfc03ce50..b5f4a60fe5 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -40,7 +40,7 @@ from synapse.server import HomeServer from synapse.util import Clock from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config def check_logcontext(context: LoggingContextOrSentinel) -> None: @@ -640,3 +640,21 @@ class FederationClientTests(HomeserverTestCase): self.cl.build_auth_headers( b"", b"GET", b"https://example.com", destination_is=b"" ) + + @override_config( + { + "federation": { + "client_timeout": "180s", + "max_long_retry_delay": "100s", + "max_short_retry_delay": "7s", + "max_long_retries": 20, + "max_short_retries": 5, + } + } + ) + def test_configurable_retry_and_delay_values(self) -> None: + self.assertEqual(self.cl.default_timeout_seconds, 180) + self.assertEqual(self.cl.max_long_retry_delay_seconds, 100) + self.assertEqual(self.cl.max_short_retry_delay_seconds, 7) + self.assertEqual(self.cl.max_long_retries, 20) + self.assertEqual(self.cl.max_short_retries, 5) From 6c749c51244df582cce8579f99a0856f79145c2e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Jun 2023 11:34:32 +0100 Subject: [PATCH 156/562] Fix typo in faster join docs (#15812) Fixes #15756 --- changelog.d/15812.doc | 1 + docs/development/synapse_architecture/faster_joins.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15812.doc diff --git a/changelog.d/15812.doc b/changelog.d/15812.doc new file mode 100644 index 0000000000..de7b64e4a2 --- /dev/null +++ b/changelog.d/15812.doc @@ -0,0 +1 @@ +Fix typo in MSC number in faster remote room join architecture doc. diff --git a/docs/development/synapse_architecture/faster_joins.md b/docs/development/synapse_architecture/faster_joins.md index c32d713b8a..1e6d585b00 100644 --- a/docs/development/synapse_architecture/faster_joins.md +++ b/docs/development/synapse_architecture/faster_joins.md @@ -6,7 +6,7 @@ This is a work-in-progress set of notes with two goals: See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). -The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to +The key idea is described by [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). This allows servers to request a lightweight response to the federation `/send_join` endpoint. This is called a **faster join**, also known as a **partial join**. In these notes we'll usually use the word "partial" as it matches the database schema. From 289ce3b8d946c4c4964fac2ff020a0535dead4f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 21 Jun 2023 14:20:46 +0100 Subject: [PATCH 157/562] Fix harmless exception in port DB script (#15814) The port DB script would try and run database background tasks, which could fail if the data they acted on was in the process of being ported. These exceptions were non fatal. Fixes #15789 --- changelog.d/15814.misc | 1 + synapse/_scripts/synapse_port_db.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/15814.misc diff --git a/changelog.d/15814.misc b/changelog.d/15814.misc new file mode 100644 index 0000000000..8e1107212f --- /dev/null +++ b/changelog.d/15814.misc @@ -0,0 +1 @@ +Fix harmless exceptions being printed when running the port DB script. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 27fee3d9a9..a803ada8ad 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -1369,6 +1369,9 @@ def main() -> None: sys.stderr.write("Database must use the 'psycopg2' connector.\n") sys.exit(3) + # Don't run the background tasks that get started by the data stores. + hs_config["run_background_tasks_on"] = "some_other_process" + config = HomeServerConfig() config.parse_config_dict(hs_config, "", "") From e0c39d6bb526b01368393ae5d2173c8e6d39b60f Mon Sep 17 00:00:00 2001 From: Nicolas Werner <89468146+nico-famedly@users.noreply.github.com> Date: Wed, 21 Jun 2023 15:56:31 +0200 Subject: [PATCH 158/562] Fix forgotten rooms missing in initial sync (#15815) If you leave a room and forget it, then rejoin it, the room would be missing from the next initial sync. fixes #13262 Signed-off-by: Nicolas Werner --- changelog.d/15815.bugfix | 1 + synapse/storage/databases/main/cache.py | 13 +++++++++++++ tests/handlers/test_room_member.py | 21 +++++++++++++++++++++ 3 files changed, 35 insertions(+) create mode 100644 changelog.d/15815.bugfix diff --git a/changelog.d/15815.bugfix b/changelog.d/15815.bugfix new file mode 100644 index 0000000000..e20b5acac1 --- /dev/null +++ b/changelog.d/15815.bugfix @@ -0,0 +1 @@ +Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 6e1c7d681f..c940f864d1 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -289,6 +289,17 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,)) + self._attempt_to_invalidate_cache( + "did_forget", + ( + state_key, + room_id, + ), + ) + self._attempt_to_invalidate_cache( + "get_forgotten_rooms_for_user", (state_key,) + ) + if relates_to: self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,)) self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,)) @@ -336,6 +347,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): "get_rooms_for_user_with_stream_ordering", None ) self._attempt_to_invalidate_cache("get_rooms_for_user", None) + self._attempt_to_invalidate_cache("did_forget", None) + self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("get_references_for_event", None) self._attempt_to_invalidate_cache("get_thread_summary", None) self._attempt_to_invalidate_cache("get_thread_participated", None) diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index a444d822cd..41199ffa29 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -333,6 +333,27 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase): self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) + def test_leave_and_unforget(self) -> None: + """Tests if rejoining a room unforgets the room, so that it shows up in sync again.""" + self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) + + # alice is not the last room member that leaves and forgets the room + self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) + self.get_success(self.handler.forget(self.alice_ID, self.room_id)) + self.assertTrue( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + + self.helper.join(self.room_id, user=self.alice, tok=self.alice_token) + self.assertFalse( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + + # the server has not forgotten the room + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(self.room_id)) + ) + @override_config({"forget_rooms_on_leave": True}) def test_leave_and_auto_forget(self) -> None: """Tests the `forget_rooms_on_leave` config option.""" From d53547352063c3e66302f7bdc40d7ac7270e605c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Jun 2023 16:32:53 +0100 Subject: [PATCH 159/562] Bump cryptography from 40.0.2 to 41.0.1 (#15800) Bumps [cryptography](https://github.com/pyca/cryptography) from 40.0.2 to 41.0.1. - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/40.0.2...41.0.1) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/poetry.lock b/poetry.lock index 88f1e9548a..d7b7a5aa2c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -465,30 +465,30 @@ files = [ [[package]] name = "cryptography" -version = "40.0.2" +version = "41.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b"}, - {file = "cryptography-40.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2"}, - {file = "cryptography-40.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b"}, - {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"}, - {file = "cryptography-40.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c"}, - {file = "cryptography-40.0.2-cp36-abi3-win32.whl", hash = "sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9"}, - {file = "cryptography-40.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a"}, - {file = "cryptography-40.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e"}, - {file = "cryptography-40.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404"}, - {file = "cryptography-40.0.2.tar.gz", hash = "sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99"}, + {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:f73bff05db2a3e5974a6fd248af2566134d8981fd7ab012e5dd4ddb1d9a70699"}, + {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1a5472d40c8f8e91ff7a3d8ac6dfa363d8e3138b961529c996f3e2df0c7a411a"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fa01527046ca5facdf973eef2535a27fec4cb651e4daec4d043ef63f6ecd4ca"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b46e37db3cc267b4dea1f56da7346c9727e1209aa98487179ee8ebed09d21e43"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d198820aba55660b4d74f7b5fd1f17db3aa5eb3e6893b0a41b75e84e4f9e0e4b"}, + {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:948224d76c4b6457349d47c0c98657557f429b4e93057cf5a2f71d603e2fc3a3"}, + {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:059e348f9a3c1950937e1b5d7ba1f8e968508ab181e75fc32b879452f08356db"}, + {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b4ceb5324b998ce2003bc17d519080b4ec8d5b7b70794cbd2836101406a9be31"}, + {file = "cryptography-41.0.1-cp37-abi3-win32.whl", hash = "sha256:8f4ab7021127a9b4323537300a2acfb450124b2def3756f64dc3a3d2160ee4b5"}, + {file = "cryptography-41.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:1fee5aacc7367487b4e22484d3c7e547992ed726d14864ee33c0176ae43b0d7c"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9a6c7a3c87d595608a39980ebaa04d5a37f94024c9f24eb7d10262b92f739ddb"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5d092fdfedaec4cbbffbf98cddc915ba145313a6fdaab83c6e67f4e6c218e6f3"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a8e6c2de6fbbcc5e14fd27fb24414507cb3333198ea9ab1258d916f00bc3039"}, + {file = "cryptography-41.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb33ccf15e89f7ed89b235cff9d49e2e62c6c981a6061c9c8bb47ed7951190bc"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f0ff6e18d13a3de56f609dd1fd11470918f770c6bd5d00d632076c727d35485"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7bfc55a5eae8b86a287747053140ba221afc65eb06207bedf6e019b8934b477c"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eb8163f5e549a22888c18b0d53d6bb62a20510060a22fd5a995ec8a05268df8a"}, + {file = "cryptography-41.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8dde71c4169ec5ccc1087bb7521d54251c016f126f922ab2dfe6649170a3b8c5"}, + {file = "cryptography-41.0.1.tar.gz", hash = "sha256:d34579085401d3f49762d2f7d6634d6b6c2ae1242202e860f4d26b046e3a1006"}, ] [package.dependencies] @@ -497,12 +497,12 @@ cffi = ">=1.12" [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "check-manifest", "mypy", "ruff"] -sdist = ["setuptools-rust (>=0.11.4)"] +nox = ["nox"] +pep8test = ["black", "check-sdist", "mypy", "ruff"] +sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist"] +test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -tox = ["tox"] [[package]] name = "defusedxml" From 6e65ca0b3619454bf7a9304f6deef8f08f45944e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 09:09:57 +0100 Subject: [PATCH 160/562] Bump types-setuptools from 67.8.0.0 to 68.0.0.0 (#15835) Bumps [types-setuptools](https://github.com/python/typeshed) from 67.8.0.0 to 68.0.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d7b7a5aa2c..133ffad6c3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3003,13 +3003,13 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "67.8.0.0" +version = "68.0.0.0" description = "Typing stubs for setuptools" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.8.0.0.tar.gz", hash = "sha256:95c9ed61871d6c0e258433373a4e1753c0a7c3627a46f4d4058c7b5a08ab844f"}, - {file = "types_setuptools-67.8.0.0-py3-none-any.whl", hash = "sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff"}, + {file = "types-setuptools-68.0.0.0.tar.gz", hash = "sha256:fc958b4123b155ffc069a66d3af5fe6c1f9d0600c35c0c8444b2ab4147112641"}, + {file = "types_setuptools-68.0.0.0-py3-none-any.whl", hash = "sha256:cc00e09ba8f535362cbe1ea8b8407d15d14b59c57f4190cceaf61a9e57616446"}, ] [[package]] From 53ea381ec3571b281079654f5b284034a1827919 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 09:14:20 +0100 Subject: [PATCH 161/562] Bump ruff from 0.0.272 to 0.0.275 (#15833) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.0.272 to 0.0.275. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/BREAKING_CHANGES.md) - [Commits](https://github.com/astral-sh/ruff/compare/v0.0.272...v0.0.275) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 38 +++++++++++++++++++------------------- pyproject.toml | 2 +- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/poetry.lock b/poetry.lock index 133ffad6c3..7dbea889ef 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2245,28 +2245,28 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.0.272" +version = "0.0.275" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.272-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:ae9b57546e118660175d45d264b87e9b4c19405c75b587b6e4d21e6a17bf4fdf"}, - {file = "ruff-0.0.272-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:1609b864a8d7ee75a8c07578bdea0a7db75a144404e75ef3162e0042bfdc100d"}, - {file = "ruff-0.0.272-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee76b4f05fcfff37bd6ac209d1370520d509ea70b5a637bdf0a04d0c99e13dff"}, - {file = "ruff-0.0.272-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48eccf225615e106341a641f826b15224b8a4240b84269ead62f0afd6d7e2d95"}, - {file = "ruff-0.0.272-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:677284430ac539bb23421a2b431b4ebc588097ef3ef918d0e0a8d8ed31fea216"}, - {file = "ruff-0.0.272-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9c4bfb75456a8e1efe14c52fcefb89cfb8f2a0d31ed8d804b82c6cf2dc29c42c"}, - {file = "ruff-0.0.272-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86bc788245361a8148ff98667da938a01e1606b28a45e50ac977b09d3ad2c538"}, - {file = "ruff-0.0.272-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27b2ea68d2aa69fff1b20b67636b1e3e22a6a39e476c880da1282c3e4bf6ee5a"}, - {file = "ruff-0.0.272-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd2bbe337a3f84958f796c77820d55ac2db1e6753f39d1d1baed44e07f13f96d"}, - {file = "ruff-0.0.272-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d5a208f8ef0e51d4746930589f54f9f92f84bb69a7d15b1de34ce80a7681bc00"}, - {file = "ruff-0.0.272-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:905ff8f3d6206ad56fcd70674453527b9011c8b0dc73ead27618426feff6908e"}, - {file = "ruff-0.0.272-py3-none-musllinux_1_2_i686.whl", hash = "sha256:19643d448f76b1eb8a764719072e9c885968971bfba872e14e7257e08bc2f2b7"}, - {file = "ruff-0.0.272-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:691d72a00a99707a4e0b2846690961157aef7b17b6b884f6b4420a9f25cd39b5"}, - {file = "ruff-0.0.272-py3-none-win32.whl", hash = "sha256:dc406e5d756d932da95f3af082814d2467943631a587339ee65e5a4f4fbe83eb"}, - {file = "ruff-0.0.272-py3-none-win_amd64.whl", hash = "sha256:a37ec80e238ead2969b746d7d1b6b0d31aa799498e9ba4281ab505b93e1f4b28"}, - {file = "ruff-0.0.272-py3-none-win_arm64.whl", hash = "sha256:06b8ee4eb8711ab119db51028dd9f5384b44728c23586424fd6e241a5b9c4a3b"}, - {file = "ruff-0.0.272.tar.gz", hash = "sha256:273a01dc8c3c4fd4c2af7ea7a67c8d39bb09bce466e640dd170034da75d14cab"}, + {file = "ruff-0.0.275-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:5e6554a072e7ce81eb6f0bec1cebd3dcb0e358652c0f4900d7d630d61691e914"}, + {file = "ruff-0.0.275-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:1cc599022fe5ffb143a965b8d659eb64161ab8ab4433d208777eab018a1aab67"}, + {file = "ruff-0.0.275-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5206fc1cd8c1c1deadd2e6360c0dbcd690f1c845da588ca9d32e4a764a402c60"}, + {file = "ruff-0.0.275-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c4e6468da26f77b90cae35319d310999f471a8c352998e9b39937a23750149e"}, + {file = "ruff-0.0.275-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0dbdea02942131dbc15dd45f431d152224f15e1dd1859fcd0c0487b658f60f1a"}, + {file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:22efd9f41af27ef8fb9779462c46c35c89134d33e326c889971e10b2eaf50c63"}, + {file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c09662112cfa22d7467a19252a546291fd0eae4f423e52b75a7a2000a1894db"}, + {file = "ruff-0.0.275-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80043726662144876a381efaab88841c88e8df8baa69559f96b22d4fa216bef1"}, + {file = "ruff-0.0.275-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5859ee543b01b7eb67835dfd505faa8bb7cc1550f0295c92c1401b45b42be399"}, + {file = "ruff-0.0.275-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c8ace4d40a57b5ea3c16555f25a6b16bc5d8b2779ae1912ce2633543d4e9b1da"}, + {file = "ruff-0.0.275-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8347fc16aa185aae275906c4ac5b770e00c896b6a0acd5ba521f158801911998"}, + {file = "ruff-0.0.275-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ec43658c64bfda44fd84bbea9da8c7a3b34f65448192d1c4dd63e9f4e7abfdd4"}, + {file = "ruff-0.0.275-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:508b13f7ca37274cceaba4fb3ea5da6ca192356323d92acf39462337c33ad14e"}, + {file = "ruff-0.0.275-py3-none-win32.whl", hash = "sha256:6afb1c4422f24f361e877937e2a44b3f8176774a476f5e33845ebfe887dd5ec2"}, + {file = "ruff-0.0.275-py3-none-win_amd64.whl", hash = "sha256:d9b264d78621bf7b698b6755d4913ab52c19bd28bee1a16001f954d64c1a1220"}, + {file = "ruff-0.0.275-py3-none-win_arm64.whl", hash = "sha256:a19ce3bea71023eee5f0f089dde4a4272d088d5ac0b675867e074983238ccc65"}, + {file = "ruff-0.0.275.tar.gz", hash = "sha256:a63a0b645da699ae5c758fce19188e901b3033ec54d862d93fcd042addf7f38d"}, ] [[package]] @@ -3294,4 +3294,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "090924370b17fd265407b5a3f9cbc00997308f575b455399b39a48e3ca1a5a8e" +content-hash = "7f31754a1009d7b6c9a1bd7221a0b243ffd510f362c28f0da417aaac16757a87" diff --git a/pyproject.toml b/pyproject.toml index 90812de019..a44ecd65e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -311,7 +311,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.272" +ruff = "0.0.275" # Typechecking lxml-stubs = ">=0.4.0" From 52d8131e87f90114018cac6daf01b1dd64eac28b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 09:26:01 +0100 Subject: [PATCH 162/562] Bump types-opentracing from 2.4.10.4 to 2.4.10.5 (#15830) Bumps [types-opentracing](https://github.com/python/typeshed) from 2.4.10.4 to 2.4.10.5. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-opentracing dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7dbea889ef..b1b6087d09 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2931,13 +2931,13 @@ files = [ [[package]] name = "types-opentracing" -version = "2.4.10.4" +version = "2.4.10.5" description = "Typing stubs for opentracing" optional = false python-versions = "*" files = [ - {file = "types-opentracing-2.4.10.4.tar.gz", hash = "sha256:347040c9da4ada7d3c795659912c95d98c5651e242e8eaa0344815fee5bb97e2"}, - {file = "types_opentracing-2.4.10.4-py3-none-any.whl", hash = "sha256:73c9b958eea3df6c4906ebf3865608a562dd9981c1bbc75a373a583c613bed56"}, + {file = "types-opentracing-2.4.10.5.tar.gz", hash = "sha256:852d13ab1324832835d50c00cfd58b9267f0e79ec3189e5664c2a90c26880fd4"}, + {file = "types_opentracing-2.4.10.5-py3-none-any.whl", hash = "sha256:8f12ab4dce3e298a8e6655da9a6d52171e7a275357eae4cec22a1663d94023a7"}, ] [[package]] From 25c55a9d2267b09da3e18090ee538782d6771a27 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 26 Jun 2023 15:12:20 +0100 Subject: [PATCH 163/562] Add login spam checker API (#15838) --- changelog.d/15838.feature | 1 + docs/modules/spam_checker_callbacks.md | 36 ++++++ synapse/http/site.py | 11 ++ synapse/module_api/__init__.py | 3 + .../callbacks/spamchecker_callbacks.py | 80 +++++++++++++ synapse/rest/client/login.py | 52 ++++++++- tests/rest/client/test_login.py | 108 +++++++++++++++++- 7 files changed, 285 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15838.feature diff --git a/changelog.d/15838.feature b/changelog.d/15838.feature new file mode 100644 index 0000000000..04c77bd723 --- /dev/null +++ b/changelog.d/15838.feature @@ -0,0 +1 @@ +Add spam checker module API for logins. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 1a0c6ec954..ffdfe6082e 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -348,6 +348,42 @@ callback returns `False`, Synapse falls through to the next one. The value of th callback that does not return `False` will be used. If this happens, Synapse will not call any of the subsequent implementations of this callback. + +### `check_login_for_spam` + +_First introduced in Synapse v1.87.0_ + +```python +async def check_login_for_spam( + user_id: str, + device_id: Optional[str], + initial_display_name: Optional[str], + request_info: Collection[Tuple[Optional[str], str]], + auth_provider_id: Optional[str] = None, +) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"] +``` + +Called when a user logs in. + +The arguments passed to this callback are: + +* `user_id`: The user ID the user is logging in with +* `device_id`: The device ID the user is re-logging into. +* `initial_display_name`: The device display name, if any. +* `request_info`: A collection of tuples, which first item is a user agent, and which + second item is an IP address. These user agents and IP addresses are the ones that were + used during the login process. +* `auth_provider_id`: The identifier of the SSO authentication provider, if any. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + +*Note:* This will not be called when a user registers. + + ## Example The example below is a module that implements the spam checker callback diff --git a/synapse/http/site.py b/synapse/http/site.py index c530966ef3..5b5a7c1e59 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -521,6 +521,11 @@ class SynapseRequest(Request): else: return self.getClientAddress().host + def request_info(self) -> "RequestInfo": + h = self.getHeader(b"User-Agent") + user_agent = h.decode("ascii", "replace") if h else None + return RequestInfo(user_agent=user_agent, ip=self.get_client_ip_if_available()) + class XForwardedForRequest(SynapseRequest): """Request object which honours proxy headers @@ -661,3 +666,9 @@ class SynapseSite(Site): def log(self, request: SynapseRequest) -> None: pass + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class RequestInfo: + user_agent: Optional[str] + ip: str diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 84b2aef620..95f7800111 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -80,6 +80,7 @@ from synapse.module_api.callbacks.account_validity_callbacks import ( ) from synapse.module_api.callbacks.spamchecker_callbacks import ( CHECK_EVENT_FOR_SPAM_CALLBACK, + CHECK_LOGIN_FOR_SPAM_CALLBACK, CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK, CHECK_REGISTRATION_FOR_SPAM_CALLBACK, CHECK_USERNAME_FOR_SPAM_CALLBACK, @@ -302,6 +303,7 @@ class ModuleApi: CHECK_REGISTRATION_FOR_SPAM_CALLBACK ] = None, check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None, + check_login_for_spam: Optional[CHECK_LOGIN_FOR_SPAM_CALLBACK] = None, ) -> None: """Registers callbacks for spam checking capabilities. @@ -319,6 +321,7 @@ class ModuleApi: check_username_for_spam=check_username_for_spam, check_registration_for_spam=check_registration_for_spam, check_media_file_for_spam=check_media_file_for_spam, + check_login_for_spam=check_login_for_spam, ) def register_account_validity_callbacks( diff --git a/synapse/module_api/callbacks/spamchecker_callbacks.py b/synapse/module_api/callbacks/spamchecker_callbacks.py index 4456d1b81e..7cee442145 100644 --- a/synapse/module_api/callbacks/spamchecker_callbacks.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py @@ -196,6 +196,26 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[ ] ], ] +CHECK_LOGIN_FOR_SPAM_CALLBACK = Callable[ + [ + str, + Optional[str], + Optional[str], + Collection[Tuple[Optional[str], str]], + Optional[str], + ], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + Codes, + # Highly experimental, not officially part of the spamchecker API, may + # disappear without warning depending on the results of ongoing + # experiments. + # Use this to return additional information as part of an error. + Tuple[Codes, JsonDict], + ] + ], +] def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: @@ -315,6 +335,7 @@ class SpamCheckerModuleApiCallbacks: self._check_media_file_for_spam_callbacks: List[ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK ] = [] + self._check_login_for_spam_callbacks: List[CHECK_LOGIN_FOR_SPAM_CALLBACK] = [] def register_callbacks( self, @@ -335,6 +356,7 @@ class SpamCheckerModuleApiCallbacks: CHECK_REGISTRATION_FOR_SPAM_CALLBACK ] = None, check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None, + check_login_for_spam: Optional[CHECK_LOGIN_FOR_SPAM_CALLBACK] = None, ) -> None: """Register callbacks from module for each hook.""" if check_event_for_spam is not None: @@ -378,6 +400,9 @@ class SpamCheckerModuleApiCallbacks: if check_media_file_for_spam is not None: self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam) + if check_login_for_spam is not None: + self._check_login_for_spam_callbacks.append(check_login_for_spam) + @trace async def check_event_for_spam( self, event: "synapse.events.EventBase" @@ -819,3 +844,58 @@ class SpamCheckerModuleApiCallbacks: return synapse.api.errors.Codes.FORBIDDEN, {} return self.NOT_SPAM + + async def check_login_for_spam( + self, + user_id: str, + device_id: Optional[str], + initial_display_name: Optional[str], + request_info: Collection[Tuple[Optional[str], str]], + auth_provider_id: Optional[str] = None, + ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: + """Checks if we should allow the given registration request. + + Args: + user_id: The request user ID + request_info: List of tuples of user agent and IP that + were used during the registration process. + auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml", + "cas". If any. Note this does not include users registered + via a password provider. + + Returns: + Enum for how the request should be handled + """ + + for callback in self._check_login_for_spam_callbacks: + with Measure( + self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) + ): + res = await delay_cancellation( + callback( + user_id, + device_id, + initial_display_name, + request_info, + auth_provider_id, + ) + ) + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is self.NOT_SPAM: + continue + elif isinstance(res, synapse.api.errors.Codes): + return res, {} + elif ( + isinstance(res, tuple) + and len(res) == 2 + and isinstance(res[0], synapse.api.errors.Codes) + and isinstance(res[1], dict) + ): + return res + else: + logger.warning( + "Module returned invalid value, rejecting login as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN, {} + + return self.NOT_SPAM diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 6493b00bb8..d724c68920 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -50,7 +50,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) -from synapse.http.site import SynapseRequest +from synapse.http.site import RequestInfo, SynapseRequest from synapse.rest.client._base import client_patterns from synapse.rest.well_known import WellKnownBuilder from synapse.types import JsonDict, UserID @@ -114,6 +114,7 @@ class LoginRestServlet(RestServlet): self.auth_handler = self.hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() self._sso_handler = hs.get_sso_handler() + self._spam_checker = hs.get_module_api_callbacks().spam_checker self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( @@ -197,6 +198,8 @@ class LoginRestServlet(RestServlet): self._refresh_tokens_enabled and client_requested_refresh_token ) + request_info = request.request_info() + try: if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE: requester = await self.auth.get_user_by_req(request) @@ -216,6 +219,7 @@ class LoginRestServlet(RestServlet): login_submission, appservice, should_issue_refresh_token=should_issue_refresh_token, + request_info=request_info, ) elif ( self.jwt_enabled @@ -227,6 +231,7 @@ class LoginRestServlet(RestServlet): result = await self._do_jwt_login( login_submission, should_issue_refresh_token=should_issue_refresh_token, + request_info=request_info, ) elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE: await self._address_ratelimiter.ratelimit( @@ -235,6 +240,7 @@ class LoginRestServlet(RestServlet): result = await self._do_token_login( login_submission, should_issue_refresh_token=should_issue_refresh_token, + request_info=request_info, ) else: await self._address_ratelimiter.ratelimit( @@ -243,6 +249,7 @@ class LoginRestServlet(RestServlet): result = await self._do_other_login( login_submission, should_issue_refresh_token=should_issue_refresh_token, + request_info=request_info, ) except KeyError: raise SynapseError(400, "Missing JSON keys.") @@ -265,6 +272,8 @@ class LoginRestServlet(RestServlet): login_submission: JsonDict, appservice: ApplicationService, should_issue_refresh_token: bool = False, + *, + request_info: RequestInfo, ) -> LoginResponse: identifier = login_submission.get("identifier") logger.info("Got appservice login request with identifier: %r", identifier) @@ -300,10 +309,15 @@ class LoginRestServlet(RestServlet): # The user represented by an appservice's configured sender_localpart # is not actually created in Synapse. should_check_deactivated=qualified_user_id != appservice.sender, + request_info=request_info, ) async def _do_other_login( - self, login_submission: JsonDict, should_issue_refresh_token: bool = False + self, + login_submission: JsonDict, + should_issue_refresh_token: bool = False, + *, + request_info: RequestInfo, ) -> LoginResponse: """Handle non-token/saml/jwt logins @@ -333,6 +347,7 @@ class LoginRestServlet(RestServlet): login_submission, callback, should_issue_refresh_token=should_issue_refresh_token, + request_info=request_info, ) return result @@ -347,6 +362,8 @@ class LoginRestServlet(RestServlet): should_issue_refresh_token: bool = False, auth_provider_session_id: Optional[str] = None, should_check_deactivated: bool = True, + *, + request_info: RequestInfo, ) -> LoginResponse: """Called when we've successfully authed the user and now need to actually login them in (e.g. create devices). This gets called on @@ -371,6 +388,7 @@ class LoginRestServlet(RestServlet): This exists purely for appservice's configured sender_localpart which doesn't have an associated user in the database. + request_info: The user agent/IP address of the user. Returns: Dictionary of account information after successful login. @@ -417,6 +435,22 @@ class LoginRestServlet(RestServlet): ) initial_display_name = login_submission.get("initial_device_display_name") + spam_check = await self._spam_checker.check_login_for_spam( + user_id, + device_id=device_id, + initial_display_name=initial_display_name, + request_info=[(request_info.user_agent, request_info.ip)], + auth_provider_id=auth_provider_id, + ) + if spam_check != self._spam_checker.NOT_SPAM: + logger.info("Blocking login due to spam checker") + raise SynapseError( + 403, + msg="Login was blocked by the server", + errcode=spam_check[0], + additional_fields=spam_check[1], + ) + ( device_id, access_token, @@ -451,7 +485,11 @@ class LoginRestServlet(RestServlet): return result async def _do_token_login( - self, login_submission: JsonDict, should_issue_refresh_token: bool = False + self, + login_submission: JsonDict, + should_issue_refresh_token: bool = False, + *, + request_info: RequestInfo, ) -> LoginResponse: """ Handle token login. @@ -474,10 +512,15 @@ class LoginRestServlet(RestServlet): auth_provider_id=res.auth_provider_id, should_issue_refresh_token=should_issue_refresh_token, auth_provider_session_id=res.auth_provider_session_id, + request_info=request_info, ) async def _do_jwt_login( - self, login_submission: JsonDict, should_issue_refresh_token: bool = False + self, + login_submission: JsonDict, + should_issue_refresh_token: bool = False, + *, + request_info: RequestInfo, ) -> LoginResponse: """ Handle the custom JWT login. @@ -496,6 +539,7 @@ class LoginRestServlet(RestServlet): login_submission, create_non_existent_users=True, should_issue_refresh_token=should_issue_refresh_token, + request_info=request_info, ) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index f3c3bc69a9..ffbc13bb8d 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -13,11 +13,12 @@ # limitations under the License. import time import urllib.parse -from typing import Any, Dict, List, Optional +from typing import Any, Collection, Dict, List, Optional, Tuple, Union from unittest.mock import Mock from urllib.parse import urlencode import pymacaroons +from typing_extensions import Literal from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource @@ -26,11 +27,12 @@ import synapse.rest.admin from synapse.api.constants import ApprovalNoticeMedium, LoginType from synapse.api.errors import Codes from synapse.appservice import ApplicationService +from synapse.module_api import ModuleApi from synapse.rest.client import devices, login, logout, register from synapse.rest.client.account import WhoamiRestServlet from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.server import HomeServer -from synapse.types import create_requester +from synapse.types import JsonDict, create_requester from synapse.util import Clock from tests import unittest @@ -88,6 +90,56 @@ ADDITIONAL_LOGIN_FLOWS = [ ] +class TestSpamChecker: + def __init__(self, config: None, api: ModuleApi): + api.register_spam_checker_callbacks( + check_login_for_spam=self.check_login_for_spam, + ) + + @staticmethod + def parse_config(config: JsonDict) -> None: + return None + + async def check_login_for_spam( + self, + user_id: str, + device_id: Optional[str], + initial_display_name: Optional[str], + request_info: Collection[Tuple[Optional[str], str]], + auth_provider_id: Optional[str] = None, + ) -> Union[ + Literal["NOT_SPAM"], + Tuple["synapse.module_api.errors.Codes", JsonDict], + ]: + return "NOT_SPAM" + + +class DenyAllSpamChecker: + def __init__(self, config: None, api: ModuleApi): + api.register_spam_checker_callbacks( + check_login_for_spam=self.check_login_for_spam, + ) + + @staticmethod + def parse_config(config: JsonDict) -> None: + return None + + async def check_login_for_spam( + self, + user_id: str, + device_id: Optional[str], + initial_display_name: Optional[str], + request_info: Collection[Tuple[Optional[str], str]], + auth_provider_id: Optional[str] = None, + ) -> Union[ + Literal["NOT_SPAM"], + Tuple["synapse.module_api.errors.Codes", JsonDict], + ]: + # Return an odd set of values to ensure that they get correctly passed + # to the client. + return Codes.LIMIT_EXCEEDED, {"extra": "value"} + + class LoginRestServletTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -469,6 +521,58 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): ], ) + @override_config( + { + "modules": [ + { + "module": TestSpamChecker.__module__ + + "." + + TestSpamChecker.__qualname__ + } + ] + } + ) + def test_spam_checker_allow(self) -> None: + """Check that that adding a spam checker doesn't break login.""" + self.register_user("kermit", "monkey") + + body = {"type": "m.login.password", "user": "kermit", "password": "monkey"} + + channel = self.make_request( + "POST", + "/_matrix/client/r0/login", + body, + ) + self.assertEqual(channel.code, 200, channel.result) + + @override_config( + { + "modules": [ + { + "module": DenyAllSpamChecker.__module__ + + "." + + DenyAllSpamChecker.__qualname__ + } + ] + } + ) + def test_spam_checker_deny(self) -> None: + """Check that login""" + + self.register_user("kermit", "monkey") + + body = {"type": "m.login.password", "user": "kermit", "password": "monkey"} + + channel = self.make_request( + "POST", + "/_matrix/client/r0/login", + body, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertDictContainsSubset( + {"errcode": Codes.LIMIT_EXCEEDED, "extra": "value"}, channel.json_body + ) + @skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC") class MultiSSOTestCase(unittest.HomeserverTestCase): From 70dc44f6673781a3e40cce9f82b30feabc174367 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 15:36:07 +0100 Subject: [PATCH 164/562] Bump towncrier from 22.12.0 to 23.6.0 (#15831) Bumps [towncrier](https://github.com/twisted/towncrier) from 22.12.0 to 23.6.0. - [Release notes](https://github.com/twisted/towncrier/releases) - [Changelog](https://github.com/twisted/towncrier/blob/trunk/NEWS.rst) - [Commits](https://github.com/twisted/towncrier/compare/22.12.0...23.6.0) --- updated-dependencies: - dependency-name: towncrier dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index b1b6087d09..ee19c246f3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2711,21 +2711,21 @@ files = [ [[package]] name = "towncrier" -version = "22.12.0" +version = "23.6.0" description = "Building newsfiles for your project." optional = false python-versions = ">=3.7" files = [ - {file = "towncrier-22.12.0-py3-none-any.whl", hash = "sha256:9767a899a4d6856950f3598acd9e8f08da2663c49fdcda5ea0f9e6ba2afc8eea"}, - {file = "towncrier-22.12.0.tar.gz", hash = "sha256:9c49d7e75f646a9aea02ae904c0bc1639c8fd14a01292d2b123b8d307564034d"}, + {file = "towncrier-23.6.0-py3-none-any.whl", hash = "sha256:da552f29192b3c2b04d630133f194c98e9f14f0558669d427708e203fea4d0a5"}, + {file = "towncrier-23.6.0.tar.gz", hash = "sha256:fc29bd5ab4727c8dacfbe636f7fb5dc53b99805b62da1c96b214836159ff70c1"}, ] [package.dependencies] click = "*" click-default-group = "*" +importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""} incremental = "*" jinja2 = "*" -setuptools = "*" tomli = {version = "*", markers = "python_version < \"3.11\""} [package.extras] From 14c1bfd53482860e176b0f336107cb1096ba7755 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 15:36:33 +0100 Subject: [PATCH 165/562] Bump serde_json from 1.0.97 to 1.0.99 (#15832) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.97 to 1.0.99. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.97...v1.0.99) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 51ff26ec1b..52f911277e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -340,9 +340,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.97" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" +checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" dependencies = [ "itoa", "ryu", From 78cfa55dad911e667b5a9b613e232eb72410382f Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 27 Jun 2023 01:41:42 -0700 Subject: [PATCH 166/562] Fix sqlite `user_filters` upgrade (#15817) --- changelog.d/15817.bugfix | 1 + .../78/02_validate_and_update_user_filters.py | 4 +- .../03_remove_unused_indexes_user_filters.py | 65 +++++++++++++++++++ .../04_add_full_user_id_index_user_filters.py | 25 +++++++ 4 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15817.bugfix create mode 100644 synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py create mode 100644 synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py diff --git a/changelog.d/15817.bugfix b/changelog.d/15817.bugfix new file mode 100644 index 0000000000..2b025730ad --- /dev/null +++ b/changelog.d/15817.bugfix @@ -0,0 +1 @@ +Fix sqlite `user_filters` upgrade introduced in v1.86.0. diff --git a/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py index 8ef63335e7..e148ed26f2 100644 --- a/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py +++ b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py @@ -61,9 +61,7 @@ def run_upgrade( full_user_id text NOT NULL, user_id text NOT NULL, filter_id bigint NOT NULL, - filter_json bytea NOT NULL, - UNIQUE (full_user_id), - UNIQUE (user_id) + filter_json bytea NOT NULL ) """ cur.execute(create_sql) diff --git a/synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py b/synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py new file mode 100644 index 0000000000..f5ba1c3fd4 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py @@ -0,0 +1,65 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine + + +def run_update( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, + config: HomeServerConfig, +) -> None: + """ + Fix to drop unused indexes caused by incorrectly adding UNIQUE constraint to + columns `user_id` and `full_user_id` of table `user_filters` in previous migration. + """ + + if isinstance(database_engine, Sqlite3Engine): + cur.execute("DROP TABLE IF EXISTS temp_user_filters") + create_sql = """ + CREATE TABLE temp_user_filters ( + full_user_id text NOT NULL, + user_id text NOT NULL, + filter_id bigint NOT NULL, + filter_json bytea NOT NULL + ) + """ + cur.execute(create_sql) + + copy_sql = """ + INSERT INTO temp_user_filters ( + user_id, + filter_id, + filter_json, + full_user_id) + SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters + """ + cur.execute(copy_sql) + + drop_sql = """ + DROP TABLE user_filters + """ + cur.execute(drop_sql) + + rename_sql = """ + ALTER TABLE temp_user_filters RENAME to user_filters + """ + cur.execute(rename_sql) + + index_sql = """ + CREATE UNIQUE INDEX IF NOT EXISTS user_filters_unique ON + user_filters (user_id, filter_id) + """ + cur.execute(index_sql) diff --git a/synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py b/synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py new file mode 100644 index 0000000000..97fecc2bd9 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py @@ -0,0 +1,25 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine + + +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: + if isinstance(database_engine, Sqlite3Engine): + idx_sql = """ + CREATE UNIQUE INDEX IF NOT EXISTS user_filters_full_user_id_unique ON + user_filters (full_user_id, filter_id) + """ + cur.execute(idx_sql) From 472c2c72f6af21504291691e89ac8296bbbef7ea Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 27 Jun 2023 10:29:20 -0500 Subject: [PATCH 167/562] Prepare changelog for v1.87.0rc1 --- CHANGES.md | 52 +++++++++++++++++++++++++++++++++++++++ changelog.d/15233.misc | 1 - changelog.d/15680.bugfix | 1 - changelog.d/15737.feature | 1 - changelog.d/15743.misc | 1 - changelog.d/15748.removal | 1 - changelog.d/15755.misc | 1 - changelog.d/15758.bugfix | 1 - changelog.d/15770.bugfix | 1 - changelog.d/15772.doc | 1 - changelog.d/15776.bugfix | 1 - changelog.d/15781.bugfix | 1 - changelog.d/15783.misc | 1 - changelog.d/15788.bugfix | 1 - changelog.d/15804.bugfix | 1 - changelog.d/15805.doc | 1 - changelog.d/15806.misc | 1 - changelog.d/15812.doc | 1 - changelog.d/15814.misc | 1 - changelog.d/15815.bugfix | 1 - changelog.d/15817.bugfix | 1 - changelog.d/15838.feature | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 24 files changed, 59 insertions(+), 22 deletions(-) delete mode 100644 changelog.d/15233.misc delete mode 100644 changelog.d/15680.bugfix delete mode 100644 changelog.d/15737.feature delete mode 100644 changelog.d/15743.misc delete mode 100644 changelog.d/15748.removal delete mode 100644 changelog.d/15755.misc delete mode 100644 changelog.d/15758.bugfix delete mode 100644 changelog.d/15770.bugfix delete mode 100644 changelog.d/15772.doc delete mode 100644 changelog.d/15776.bugfix delete mode 100644 changelog.d/15781.bugfix delete mode 100644 changelog.d/15783.misc delete mode 100644 changelog.d/15788.bugfix delete mode 100644 changelog.d/15804.bugfix delete mode 100644 changelog.d/15805.doc delete mode 100644 changelog.d/15806.misc delete mode 100644 changelog.d/15812.doc delete mode 100644 changelog.d/15814.misc delete mode 100644 changelog.d/15815.bugfix delete mode 100644 changelog.d/15817.bugfix delete mode 100644 changelog.d/15838.feature diff --git a/CHANGES.md b/CHANGES.md index ff4126044b..3def0a44cd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,55 @@ +# Synapse 1.87.0rc1 (2023-06-27) + +### Features + +- Improve `/messages` response time by avoiding backfill when we already have messages to return. ([\#15737](https://github.com/matrix-org/synapse/issues/15737)) +- Add spam checker module API for logins. ([\#15838](https://github.com/matrix-org/synapse/issues/15838)) + +### Bugfixes + +- Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou. ([\#15680](https://github.com/matrix-org/synapse/issues/15680)) +- Avoid invalidating a cache that was just prefilled. ([\#15758](https://github.com/matrix-org/synapse/issues/15758)) +- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15770](https://github.com/matrix-org/synapse/issues/15770)) +- Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper. ([\#15776](https://github.com/matrix-org/synapse/issues/15776)) +- Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`. ([\#15781](https://github.com/matrix-org/synapse/issues/15781)) +- Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend. ([\#15788](https://github.com/matrix-org/synapse/issues/15788)) +- Fix Sytest environmental variable evaluation in CI. ([\#15804](https://github.com/matrix-org/synapse/issues/15804)) +- Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly. ([\#15815](https://github.com/matrix-org/synapse/issues/15815)) +- Fix sqlite `user_filters` upgrade introduced in v1.86.0. ([\#15817](https://github.com/matrix-org/synapse/issues/15817)) + +### Improved Documentation + +- Document `looping_call()` functionality that will wait for the given function to finish before scheduling another. ([\#15772](https://github.com/matrix-org/synapse/issues/15772)) +- Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html). ([\#15805](https://github.com/matrix-org/synapse/issues/15805)) +- Fix typo in MSC number in faster remote room join architecture doc. ([\#15812](https://github.com/matrix-org/synapse/issues/15812)) + +### Deprecations and Removals + +- Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms. ([\#15748](https://github.com/matrix-org/synapse/issues/15748)) + +### Internal Changes + +- Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`. ([\#15233](https://github.com/matrix-org/synapse/issues/15233)) +- Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying. ([\#15743](https://github.com/matrix-org/synapse/issues/15743)) +- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15755](https://github.com/matrix-org/synapse/issues/15755)) +- Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. ([\#15783](https://github.com/matrix-org/synapse/issues/15783)) +- Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic. ([\#15806](https://github.com/matrix-org/synapse/issues/15806)) +- Fix harmless exceptions being printed when running the port DB script. ([\#15814](https://github.com/matrix-org/synapse/issues/15814)) + +### Updates to locked dependencies + +* Bump attrs from 22.2.0 to 23.1.0. ([\#15801](https://github.com/matrix-org/synapse/issues/15801)) +* Bump cryptography from 40.0.2 to 41.0.1. ([\#15800](https://github.com/matrix-org/synapse/issues/15800)) +* Bump ijson from 3.2.0.post0 to 3.2.1. ([\#15802](https://github.com/matrix-org/synapse/issues/15802)) +* Bump phonenumbers from 8.13.13 to 8.13.14. ([\#15798](https://github.com/matrix-org/synapse/issues/15798)) +* Bump ruff from 0.0.265 to 0.0.272. ([\#15799](https://github.com/matrix-org/synapse/issues/15799)) +* Bump ruff from 0.0.272 to 0.0.275. ([\#15833](https://github.com/matrix-org/synapse/issues/15833)) +* Bump serde_json from 1.0.96 to 1.0.97. ([\#15797](https://github.com/matrix-org/synapse/issues/15797)) +* Bump serde_json from 1.0.97 to 1.0.99. ([\#15832](https://github.com/matrix-org/synapse/issues/15832)) +* Bump towncrier from 22.12.0 to 23.6.0. ([\#15831](https://github.com/matrix-org/synapse/issues/15831)) +* Bump types-opentracing from 2.4.10.4 to 2.4.10.5. ([\#15830](https://github.com/matrix-org/synapse/issues/15830)) +* Bump types-setuptools from 67.8.0.0 to 68.0.0.0. ([\#15835](https://github.com/matrix-org/synapse/issues/15835)) + Synapse 1.86.0 (2023-06-20) =========================== diff --git a/changelog.d/15233.misc b/changelog.d/15233.misc deleted file mode 100644 index 1dff00bf3c..0000000000 --- a/changelog.d/15233.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`. diff --git a/changelog.d/15680.bugfix b/changelog.d/15680.bugfix deleted file mode 100644 index 04ac19b4ec..0000000000 --- a/changelog.d/15680.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou. diff --git a/changelog.d/15737.feature b/changelog.d/15737.feature deleted file mode 100644 index 9a547b5ebd..0000000000 --- a/changelog.d/15737.feature +++ /dev/null @@ -1 +0,0 @@ -Improve `/messages` response time by avoiding backfill when we already have messages to return. diff --git a/changelog.d/15743.misc b/changelog.d/15743.misc deleted file mode 100644 index b95eed929e..0000000000 --- a/changelog.d/15743.misc +++ /dev/null @@ -1 +0,0 @@ -Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying. diff --git a/changelog.d/15748.removal b/changelog.d/15748.removal deleted file mode 100644 index dcb9780178..0000000000 --- a/changelog.d/15748.removal +++ /dev/null @@ -1 +0,0 @@ -Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms. diff --git a/changelog.d/15755.misc b/changelog.d/15755.misc deleted file mode 100644 index a65340d380..0000000000 --- a/changelog.d/15755.misc +++ /dev/null @@ -1 +0,0 @@ -Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). diff --git a/changelog.d/15758.bugfix b/changelog.d/15758.bugfix deleted file mode 100644 index cabe25ca24..0000000000 --- a/changelog.d/15758.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid invalidating a cache that was just prefilled. diff --git a/changelog.d/15770.bugfix b/changelog.d/15770.bugfix deleted file mode 100644 index a65340d380..0000000000 --- a/changelog.d/15770.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). diff --git a/changelog.d/15772.doc b/changelog.d/15772.doc deleted file mode 100644 index 4d6c933c71..0000000000 --- a/changelog.d/15772.doc +++ /dev/null @@ -1 +0,0 @@ -Document `looping_call()` functionality that will wait for the given function to finish before scheduling another. diff --git a/changelog.d/15776.bugfix b/changelog.d/15776.bugfix deleted file mode 100644 index f146a85f1a..0000000000 --- a/changelog.d/15776.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper. diff --git a/changelog.d/15781.bugfix b/changelog.d/15781.bugfix deleted file mode 100644 index 5faf59afee..0000000000 --- a/changelog.d/15781.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`. \ No newline at end of file diff --git a/changelog.d/15783.misc b/changelog.d/15783.misc deleted file mode 100644 index 0bebaa213d..0000000000 --- a/changelog.d/15783.misc +++ /dev/null @@ -1 +0,0 @@ -Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. diff --git a/changelog.d/15788.bugfix b/changelog.d/15788.bugfix deleted file mode 100644 index d22aae7baf..0000000000 --- a/changelog.d/15788.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend. \ No newline at end of file diff --git a/changelog.d/15804.bugfix b/changelog.d/15804.bugfix deleted file mode 100644 index 7c8b954397..0000000000 --- a/changelog.d/15804.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix Sytest environmental variable evaluation in CI. diff --git a/changelog.d/15805.doc b/changelog.d/15805.doc deleted file mode 100644 index 446f627cfc..0000000000 --- a/changelog.d/15805.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html). diff --git a/changelog.d/15806.misc b/changelog.d/15806.misc deleted file mode 100644 index 80d0eb2f8f..0000000000 --- a/changelog.d/15806.misc +++ /dev/null @@ -1 +0,0 @@ -Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic. diff --git a/changelog.d/15812.doc b/changelog.d/15812.doc deleted file mode 100644 index de7b64e4a2..0000000000 --- a/changelog.d/15812.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in MSC number in faster remote room join architecture doc. diff --git a/changelog.d/15814.misc b/changelog.d/15814.misc deleted file mode 100644 index 8e1107212f..0000000000 --- a/changelog.d/15814.misc +++ /dev/null @@ -1 +0,0 @@ -Fix harmless exceptions being printed when running the port DB script. diff --git a/changelog.d/15815.bugfix b/changelog.d/15815.bugfix deleted file mode 100644 index e20b5acac1..0000000000 --- a/changelog.d/15815.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly. diff --git a/changelog.d/15817.bugfix b/changelog.d/15817.bugfix deleted file mode 100644 index 2b025730ad..0000000000 --- a/changelog.d/15817.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix sqlite `user_filters` upgrade introduced in v1.86.0. diff --git a/changelog.d/15838.feature b/changelog.d/15838.feature deleted file mode 100644 index 04c77bd723..0000000000 --- a/changelog.d/15838.feature +++ /dev/null @@ -1 +0,0 @@ -Add spam checker module API for logins. diff --git a/debian/changelog b/debian/changelog index 9d057c65ef..2fa8d30fe1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium + + * New synapse release 1.87.0rc1. + + -- Synapse Packaging team Tue, 27 Jun 2023 15:27:04 +0000 + matrix-synapse-py3 (1.86.0) stable; urgency=medium * New Synapse release 1.86.0. diff --git a/pyproject.toml b/pyproject.toml index a44ecd65e4..fc47b1ef71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.86.0" +version = "1.87.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 10ed3e233ee02d6a1faa744cf226c67042b60471 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 27 Jun 2023 10:34:11 -0500 Subject: [PATCH 168/562] Note last release with Python 3.7 support See https://github.com/matrix-org/synapse/issues/15836 --- CHANGES.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 3def0a44cd..dab35075b0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,8 @@ # Synapse 1.87.0rc1 (2023-06-27) +Please note that this will be the last release of Synapse that is compatible with +Python 3.7 and earlier. + ### Features - Improve `/messages` response time by avoiding backfill when we already have messages to return. ([\#15737](https://github.com/matrix-org/synapse/issues/15737)) From 13fc89148c37f6c653dd237377ff165249fa73cc Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 28 Jun 2023 15:10:33 -0500 Subject: [PATCH 169/562] Split out 2022 changes from the changelog (#15846) Split out 2022 changes from the changelog so the rendered version in GitHub doesn't timeout as much. --- CHANGES.md | 2764 ------------------------------ changelog.d/15846.misc | 1 + docs/changelogs/CHANGES-2022.md | 2766 +++++++++++++++++++++++++++++++ 3 files changed, 2767 insertions(+), 2764 deletions(-) create mode 100644 changelog.d/15846.misc create mode 100644 docs/changelogs/CHANGES-2022.md diff --git a/CHANGES.md b/CHANGES.md index dab35075b0..2765045a13 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1253,2768 +1253,4 @@ Internal Changes - Bump ruff from 0.0.206 to 0.0.215. ([\#14796](https://github.com/matrix-org/synapse/issues/14796)) -Synapse 1.74.0 (2022-12-20) -=========================== - -Improved Documentation ----------------------- - -- Add release note and update documentation regarding optional ICU support in user search. ([\#14712](https://github.com/matrix-org/synapse/issues/14712)) - - -Synapse 1.74.0rc1 (2022-12-13) -============================== - -Features --------- - -- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464)) -- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525)) -- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619)) -- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576)) -- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598)) -- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642)) - - -Bugfixes --------- - -- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604)) -- Suppress a spurious warning when `POST /rooms///`, `POST /join//` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600)) -- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621)) -- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625)) -- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631)) -- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632)) -- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637)) -- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643)) -- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650)) -- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664)) -- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670)) - - -Improved Documentation ----------------------- - -- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493)) -- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517)) -- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590)) -- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594)) -- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634)) - - -Internal Changes ----------------- - -- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255)) -- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474)) -- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528)) -- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548)) -- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549)) -- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568)) -- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591)) -- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602)) -- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607)) -- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610)) -- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611)) -- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612)) -- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613)) -- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614)) -- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615)) -- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616)) -- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636)) -- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645)) -- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656)) -- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657)) -- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658)) -- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659)) -- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660)) -- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661)) -- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668)) -- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671)) - - -Synapse 1.73.0 (2022-12-06) -=========================== - -Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details. - -No significant changes since 1.73.0rc2. - - -Synapse 1.73.0rc2 (2022-12-01) -============================== - -Bugfixes --------- - -- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582)) - - -Synapse 1.73.0rc1 (2022-11-29) -============================== - -Features --------- - -- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527)) -- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534)) -- Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917)) -- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms//timestamp_to_event?ts=&dir=`, `/_matrix/federation/v1/timestamp_to_event/?ts=&dir=`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471)) -- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510)) -- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524)) -- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580)) - - -Bugfixes --------- - -- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149)) -- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393)) -- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466)) -- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537)) -- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574)) -- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565)) -- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490)) - - -Improved Documentation ----------------------- - -- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499)) - - -Deprecations and Removals -------------------------- - -- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538)) - - -Internal Changes ----------------- - -- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)). -- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468)) -- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476)) -- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496)) -([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573)) -- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403)) -- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404)) -- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408)) -- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515)) -- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449)) -- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469)) -- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479)) -- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487)) -- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516)) -- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522)) -- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526)) -- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571)) -- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575)) -- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)). - - -Synapse 1.72.0 (2022-11-22) -=========================== - -Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md). - -Bugfixes --------- - -- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477)) - - -Synapse 1.72.0rc1 (2022-11-16) -============================== - -Features --------- - -- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260)) -- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396)) -- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405)) -- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442)) - - -Bugfixes --------- - -- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292)) -- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347)) -- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356)) -- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361)) -- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364)) -- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369)) -- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374)) -- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409)) -- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448)) -- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453)) - - -Updates to the Docker image ---------------------------- - -- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197)) -- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294)) - - -Improved Documentation ----------------------- - -- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370)) -- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293)) -- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297)) -- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414)) - - -Deprecations and Removals -------------------------- - -- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397)) - - -Internal Changes ----------------- - -- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812)) -- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455)) -- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313)) -- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324)) -- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339)) -- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346)) -- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351)) -- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375)) -- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394)) -- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410)) -- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411)) -- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417)) -- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433)) -- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434)) -- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451)) -- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461)) - - -Synapse 1.71.0 (2022-11-08) -=========================== - -Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default. -They will be removed altogether in Synapse 1.73.0. -If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. -See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details. - -**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+. - -No significant changes since 1.71.0rc2. - - -Synapse 1.71.0rc2 (2022-11-04) -============================== - -Improved Documentation ----------------------- - -- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360)) - - -Deprecations and Removals -------------------------- - -- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353)) - - -Internal Changes ----------------- - -- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812)) - - -Synapse 1.71.0rc1 (2022-11-01) -============================== - -Features --------- - -- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414)) -- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311)) -- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804)) -- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652)) -- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897)) -- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205)) -- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262)) - - -Bugfixes --------- - -- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422)) -- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927)) -- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958)) -- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064)) -- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243)) -- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258)) -- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291)) -- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304)) -- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323)) -- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336)) - - -Improved Documentation ----------------------- - -- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999)) -- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086)) -- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110)) -- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191)) - - -Internal Changes ----------------- - -- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595)) -- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844)) -- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910)) -- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084)) -- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096)) -- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228)) -- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229)) -- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230)) -- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249)) -- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268)) -- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287)) -- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303)) -- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305)) - -
-Dependency updates - -Runtime: - -- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278)) -- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271)) -- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274)) -- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331)) -- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270)) -- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330)) -- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277)) -- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279)) - -Tooling and CI: - -- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328)) -- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042)) -- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276)) -- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275)) -- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273)) -- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332)) -- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133)) -- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272)) -
- -Synapse 1.70.1 (2022-10-28) -=========================== - -This release fixes some regressions that were discovered in 1.70.0. - -[#14300](https://github.com/matrix-org/synapse/issues/14300) -was previously reported to be a regression in 1.70.0 as well. However, we have -since concluded that it was limited to the reporter and thus have not needed -to include any fix for it in 1.70.1. - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301)) -- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314)) - - -Synapse 1.70.0 (2022-10-26) -=========================== - -No significant changes since 1.70.0rc2. - - -Synapse 1.70.0rc2 (2022-10-25) -============================== - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248)) -- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280)) -- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283)) - - -Internal Changes ----------------- - -- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253)) -- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259)) - - -Synapse 1.70.0rc1 (2022-10-19) -============================== - -Features --------- - -- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175)) -- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222)) -- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816)) -- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996)) -- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997)) -- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018)) -- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028)) -- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184)) -- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054)) -- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148)) -- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179)) -- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212)) - - -Bugfixes --------- - -- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015)) -- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813)) -- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034)) -- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053)) -- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065)) -- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083)) -- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089)) -- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102)) -- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125)) -- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164)) -- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215)) - - -Updates to the Docker image ---------------------------- - -- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955)) -- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141)) -- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165)) -- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182)) -- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195)) - - -Improved Documentation ----------------------- - -- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077)) -- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078)) -- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081)) -- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087)) -- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093)) -- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107)) -- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124)) -- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145)) -- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189)) - - -Deprecations and Removals -------------------------- - -- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094)) -- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146)) - - -Internal Changes ----------------- - -- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991)) -- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006)) -- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007)) -- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033)) -- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063)) -- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072)) -- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092)) -- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095)) -- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097)) -- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109)) -- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126)) -- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130)) -- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132)) -- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134)) -- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141)) -- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142)) -- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144)) -- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155)) -- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198)) -- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214)) -- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224)) -- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217)) -- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221)) -- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227)) - - -Synapse 1.69.0 (2022-10-17) -=========================== - -Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0. -Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. -See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details. - - -No significant changes since 1.69.0rc4. - - -Synapse 1.69.0rc4 (2022-10-14) -============================== - -Bugfixes --------- - -- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181)) - - -Updates to the Docker image ---------------------------- - -- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173)) - - -Synapse 1.69.0rc3 (2022-10-12) -============================== - -Bugfixes --------- - -- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129)) -- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135)) -- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138)) - - -Internal Changes ----------------- - -- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085)) - - -Synapse 1.69.0rc2 (2022-10-06) -============================== - -Deprecations and Removals -------------------------- - -- Deprecate the `generate_short_term_login_token` method in favor of an async `create_login_token` method in the Module API. ([\#13842](https://github.com/matrix-org/synapse/issues/13842)) - - -Internal Changes ----------------- - -- Ensure Synapse v1.69 works with upcoming database changes in v1.70. ([\#14045](https://github.com/matrix-org/synapse/issues/14045)) -- Fix a bug introduced in Synapse v1.68.0 where messages could not be sent in rooms with non-integer `notifications` power level. ([\#14073](https://github.com/matrix-org/synapse/issues/14073)) -- Temporarily pin build-system requirements to workaround an incompatibility with poetry-core 1.3.0. This will be reverted before the v1.69.0 release proper, see [\#14079](https://github.com/matrix-org/synapse/issues/14079). ([\#14080](https://github.com/matrix-org/synapse/issues/14080)) - - -Synapse 1.69.0rc1 (2022-10-04) -============================== - -Features --------- - -- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866)) -- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556)) -- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936)) -- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947)) -- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868)) -- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939)) -- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860)) -- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815)) -- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832)) -- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892)) -- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920)) -- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957)) - - -Bugfixes --------- - -- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014)) -- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830)) -- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840)) -- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855)) -- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863)) -- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872)) -- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904)) -- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909)) -- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922)) -- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952)) -- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956)) -- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972)) -- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009)) -- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025)) -- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885)) -- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934)) - - -Improved Documentation ----------------------- - -- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772)) -- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818)) -- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836)) -- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870)) -- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930)) -- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915)) -- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928)) -- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931)) -- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974)) -- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003)) - - -Deprecations and Removals -------------------------- - -- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843)) -- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024)) - - -Internal Changes ----------------- - -- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800)) -- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889)) -- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787)) -- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792)) -- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796)) -- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809)) -- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823)) -- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839)) -- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850)) -- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859)) -- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867)) -- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873)) -- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013)) -- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876)) -- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879)) -- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888)) -- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890)) -- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905)) -- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913)) -- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914)) -- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924)) -- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993)) -- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960)) -- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966)) -- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969)) -- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992)) -- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976)) -- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027)) -- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012)) -- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020)) -- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023)) - - -Synapse 1.68.0 (2022-09-27) -=========================== - -Please note that Synapse will now refuse to start if configured to use a version of SQLite older than 3.27. - -In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler. -Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected. -See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1680). - -Bugfixes --------- - -- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909)) - - -Synapse 1.68.0rc2 (2022-09-23) -============================== - -Bugfixes --------- - -- Fix building from packaged sdist. Broken in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866)) - - -Internal Changes ----------------- - -- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850)) -- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857)) -- Lock Rust dependencies' versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858)) - - -Synapse 1.68.0rc1 (2022-09-20) -============================== - -Features --------- - -- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814)) -- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672)) -- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810)) -- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680)) -- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736)) -- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741)) -- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506)) -- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723)) -- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738)) -- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746)) -- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826)) -- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766)) -- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789)) -- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825)) - - -Improved Documentation ----------------------- - -- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480)) -- Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726)) -- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727)) -- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728)) -- Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785)) -- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794)) - - -Deprecations and Removals -------------------------- - -- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760)) -- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791)) - - -Internal Changes ----------------- - -- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778)) -- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162)) -- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703)) -- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706)) -- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725)) -- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718)) -- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724)) -- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729)) -- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730)) -- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745)) -- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748)) -- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750)) -- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752)) -- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753)) -- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754)) -- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756)) -- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759)) -- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761)) -- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765)) -- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770)) -- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780)) -- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784)) -- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788)) -- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795)) -- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798)) -- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802)) -- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808)) -- Fix Docker build when Rust .so has been built locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811)) -- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819)) -- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822)) -- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827)) - - -Synapse 1.67.0 (2022-09-13) -=========================== - -This release removes using the deprecated direct TCP replication configuration -for workers. Server admins should use Redis instead. See the [upgrade -notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). - -The minimum version of `poetry` supported for managing source checkouts is now -1.2.0. - -**Notice:** from the next major release (1.68.0) installing Synapse from a source -checkout will require a recent Rust compiler. Those using packages or -`pip install matrix-synapse` will not be affected. See the [upgrade -notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). - -**Notice:** from the next major release (1.68.0), running Synapse with a SQLite -database will require SQLite version 3.27.0 or higher. (The [current minimum - version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).) -See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670) for more details. - - -No significant changes since 1.67.0rc1. - - -Synapse 1.67.0rc1 (2022-09-06) -============================== - -Features --------- - -- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614)) -- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615)) -- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509)) -- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546)) -- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583)) -- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585)) -- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616)) -- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632)) -- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657)) -- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658)) -- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660)) -- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683)) -- Fix a bug introduced in Synapse 1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694)) - - -Updates to the Docker image ---------------------------- - -- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688)) - - -Improved Documentation ----------------------- - -- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602)) -- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617)) -- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640)) -- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645)) -- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656)) -- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665)) -- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678)) -- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701)) - - -Deprecations and Removals -------------------------- - -- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241)) -- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569)) -- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse 1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647)) -- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692)) - - -Internal Changes ----------------- - -- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483)) -- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540)) -- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600)) -- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575)) -- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586)) -- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588)) -- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591)) -- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592)) -- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597)) -- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603)) -- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605)) -- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606)) -- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608)) -- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627)) -- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639)) -- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662)) -- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671)) -- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679)) -- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689)) -- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693)) -- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697)) -- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698)) -- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712)) -- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713)) - - -Synapse 1.66.0 (2022-08-31) -=========================== - -No significant changes since 1.66.0rc2. - -This release removes the ability for homeservers to delegate email ownership -verification and password reset confirmation to identity servers. This removal -was originally planned for Synapse 1.64, but was later deferred until now. See -the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. - -Deployments with multiple workers should note that the direct TCP replication -configuration was deprecated in Synapse 1.18.0 and will be removed in Synapse -v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners) -type (not to be confused with the `replication` resource on the `http` listener -type) and the `worker_replication_port` config option will be removed . - -To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration), -then remove the TCP `replication` listener from config of the master and -`worker_replication_port` from worker config. Note that a HTTP listener with a -`replication` resource is still required. See the -[worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) -for more details. - - -Synapse 1.66.0rc2 (2022-08-30) -============================== - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.66.0rc1 where the new rate limit metrics were misreported (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649)) - - -Synapse 1.66.0rc1 (2022-08-23) -============================== - -Features --------- - -- Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563)) -- Add forgotten status to [Room Details Admin API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#room-details-api). ([\#13503](https://github.com/matrix-org/synapse/issues/13503)) -- Add an experimental implementation for [MSC3852 (Expose user agents on `Device`)](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) -- Add `org.matrix.msc2716v4` experimental room version with updated content fields. Part of [MSC2716 (Importing history)](https://github.com/matrix-org/matrix-spec-proposals/pull/2716). ([\#13551](https://github.com/matrix-org/synapse/issues/13551)) -- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537)) -- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547)) - - -Bugfixes --------- - -- Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514)) -- Fix a bug introduced in Synapse 1.21.0 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525)) -- Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566)) -- Fix a bug where the `opentracing.force_tracing_for_users` config option would not apply to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574)) - - -Improved Documentation ----------------------- - -- Add `openssl` example for generating registration HMAC digest. ([\#13472](https://github.com/matrix-org/synapse/issues/13472)) -- Tidy up Synapse's README. ([\#13491](https://github.com/matrix-org/synapse/issues/13491)) -- Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. ([\#13492](https://github.com/matrix-org/synapse/issues/13492)) -- Add a warning to retention documentation regarding the possibility of database corruption. ([\#13497](https://github.com/matrix-org/synapse/issues/13497)) -- Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. ([\#13515](https://github.com/matrix-org/synapse/issues/13515)) -- Add missing links in `user_consent` section of configuration manual. ([\#13536](https://github.com/matrix-org/synapse/issues/13536)) -- Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538)) - - -Deprecations and Removals -------------------------- - -- Remove the ability for homeservers to delegate email ownership verification - and password reset confirmation to identity servers. See [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. - -Internal Changes ----------------- - -### Faster room joins - -- Update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459)) -- Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477)) -- Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531)) - -### Metrics and tracing - -- Allow use of both `@trace` and `@tag_args` stacked on the same function. ([\#13453](https://github.com/matrix-org/synapse/issues/13453)) -- Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489)) -- Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554)) -- Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533)) -- Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541)) -- Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584)) -- Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544)) -- Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545)) - -### Everything else - -- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024)) -- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471)) -- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474)) -- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488)) -- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485)) -- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493)) -- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502)) -- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543)) -- Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) -- Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558)) - - -Synapse 1.65.0 (2022-08-16) -=========================== - -No significant changes since 1.65.0rc2. - - -Synapse 1.65.0rc2 (2022-08-11) -============================== - -Internal Changes ----------------- - -- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501)) - - -Synapse 1.65.0rc1 (2022-08-09) -============================== - -Features --------- - -- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273)) -- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343)) -- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370)) -- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428)) -- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429)) -- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441)) - - -Bugfixes --------- - -- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) -- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365)) -- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374)) -- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392)) -- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408)) -- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353)) -- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413)) -- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432)) - - -Updates to the Docker image ---------------------------- - -- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372)) - - -Improved Documentation ----------------------- - -- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897)) -- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221)) -- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230)) -- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437)) -- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438)) -- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443)) -- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449)) -- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450)) -- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481)) - - -Internal Changes ----------------- - -- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978)) -- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160)) -- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213)) -- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346)) -- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355)) -- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368)) -- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383)) -- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393)) -- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397)) -- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403)) -- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431)) -- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416)) -- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420)) -- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435)) -- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439)) -- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442)) -- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455)) -- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447)) -- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452)) -- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460)) -- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469)) -- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467)) - - -Synapse 1.64.0 (2022-08-02) -=========================== - -No significant changes since 1.64.0rc2. - - -Deprecation Warning -------------------- - -Synapse 1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. - -If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf. -[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email) - - -Synapse 1.64.0rc2 (2022-07-29) -============================== - -This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse 1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) - - -Synapse 1.64.0rc1 (2022-07-26) -============================== - -This RC removed the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. - -We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu. - - -Features --------- - -- Improve error messages when media thumbnails cannot be served. ([\#13038](https://github.com/matrix-org/synapse/issues/13038)) -- Allow pagination from remote event after discovering it from [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205)) -- Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) -- Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220)) -- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276)) -- Support Implicit TLS (TLS without using a STARTTLS upgrade, typically on port 465) for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the `enable_email_notifs` and `email_notifs_for_new_users` options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263)) -- Fix a bug introduced in Synapse 1.40.0 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270)) -- Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278)) -- Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296)) -- Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350)) -- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13284](https://github.com/matrix-org/synapse/issues/13284), [\#13352](https://github.com/matrix-org/synapse/issues/13352)) - - -Improved Documentation ----------------------- - -- Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231)) -- Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261)) -- Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271)) -- Document that certain config options were added or changed in Synapse 1.62. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) -- Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333)) -- Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338)) -- Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344)) - - -Deprecations and Removals -------------------------- - -- Drop tables that were formerly used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967)) -- Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192)) -- Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239)) -- Stop building `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326)) - - -Internal Changes ----------------- - -- Use lower transaction isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942)) -- Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943)) -- Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094)) -- Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172)) -- Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. ([\#13175](https://github.com/matrix-org/synapse/issues/13175)) -- Refactor receipts servlet logic to avoid duplicated code. ([\#13198](https://github.com/matrix-org/synapse/issues/13198)) -- Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215)) -- Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218)) -- Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224)) -- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. Contributed by @Vetchu. ([\#13240](https://github.com/matrix-org/synapse/issues/13240)) -- Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308)) -- Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251)) -- Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257)) -- Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258)) -- Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260)) -- Do not fail build if complement with workers fails. ([\#13266](https://github.com/matrix-org/synapse/issues/13266)) -- Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274)) -- Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279)) -- Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307)) -- Upgrade from Poetry 1.1.12 to 1.1.14, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285)) -- Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292)) -- Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297)) -- Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299)) -- Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. ([\#13300](https://github.com/matrix-org/synapse/issues/13300)) -- Remove unnecessary `json.dumps` from tests. ([\#13303](https://github.com/matrix-org/synapse/issues/13303)) -- Reduce memory usage of sending dummy events. ([\#13310](https://github.com/matrix-org/synapse/issues/13310)) -- Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. ([\#13311](https://github.com/matrix-org/synapse/issues/13311)) -- Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. ([\#13313](https://github.com/matrix-org/synapse/issues/13313)) -- Validate federation destinations and log an error if a destination is invalid. ([\#13318](https://github.com/matrix-org/synapse/issues/13318)) -- Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. ([\#13320](https://github.com/matrix-org/synapse/issues/13320)) -- Reduce memory usage of state caches. ([\#13323](https://github.com/matrix-org/synapse/issues/13323)) -- Reduce the amount of state we store in the `state_cache`. ([\#13324](https://github.com/matrix-org/synapse/issues/13324)) -- Add missing type hints to open tracing module. ([\#13328](https://github.com/matrix-org/synapse/issues/13328), [\#13345](https://github.com/matrix-org/synapse/issues/13345), [\#13362](https://github.com/matrix-org/synapse/issues/13362)) -- Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). ([\#13329](https://github.com/matrix-org/synapse/issues/13329), [\#13349](https://github.com/matrix-org/synapse/issues/13349)) -- When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. ([\#13342](https://github.com/matrix-org/synapse/issues/13342)) -- Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. ([\#13354](https://github.com/matrix-org/synapse/issues/13354)) - - -Synapse 1.63.1 (2022-07-20) -=========================== - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. ([\#13332](https://github.com/matrix-org/synapse/issues/13332)) - - -Synapse 1.63.0 (2022-07-19) -=========================== - -Improved Documentation ----------------------- - -- Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. ([\#13321](https://github.com/matrix-org/synapse/issues/13321)) - - -Synapse 1.63.0rc1 (2022-07-12) -============================== - -Features --------- - -- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125)) -- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) -- Improve validation logic in the account data REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148)) - - -Bugfixes --------- - -- Fix a long-standing bug where application services were not able to join remote federated rooms without a profile. ([\#13131](https://github.com/matrix-org/synapse/issues/13131)) -- Fix a long-standing bug where `_get_state_map_for_room` might raise errors when third party event rules callbacks are present. ([\#13174](https://github.com/matrix-org/synapse/issues/13174)) -- Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226)) -- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, when both [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) and [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) are enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236)) -- Fix a bug introduced in 1.62.0 where rows were not deleted from `event_push_actions` table on large servers. ([\#13194](https://github.com/matrix-org/synapse/issues/13194)) -- Fix a bug introduced in 1.62.0 where notification counts would get stuck after a highlighted message. ([\#13223](https://github.com/matrix-org/synapse/issues/13223)) -- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197)) -- Fix [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202)-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235)) - - -Updates to the Docker image ---------------------------- - -- Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. ([\#13207](https://github.com/matrix-org/synapse/issues/13207)) - - -Improved Documentation ----------------------- - -- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029)) -- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032)) -- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166)) -- Add documentation for homeserver usage statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086)) -- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212)) -- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139)) -- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132)) -- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116)) - - -Deprecations and Removals -------------------------- - -- Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. ([\#13200](https://github.com/matrix-org/synapse/issues/13200)) - - -Internal Changes ----------------- - -- Add type annotations to `synapse.logging`, `tests.server` and `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028), [\#13103](https://github.com/matrix-org/synapse/issues/13103), [\#13159](https://github.com/matrix-org/synapse/issues/13159), [\#13136](https://github.com/matrix-org/synapse/issues/13136)) -- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135)) -- Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044)) -- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158)) -- Add support to `complement.sh` for setting the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment variable. ([\#13152](https://github.com/matrix-org/synapse/issues/13152)) -- Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157)) -- Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127)) -- Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167)) -- Faster room joins: handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100)) -- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151)) -- Faster room joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144)) -- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113)) -- Avoid stripping line breaks from SQL sent to the database. ([\#13129](https://github.com/matrix-org/synapse/issues/13129)) -- Apply ratelimiting earlier in processing of `/send` requests. ([\#13134](https://github.com/matrix-org/synapse/issues/13134)) -- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145)) -- Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195)) -- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153)) -- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222)) -- Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209)) -- Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210)) -- More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211)) -- Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228)) - -Synapse 1.62.0 (2022-07-05) -=========================== - -No significant changes since 1.62.0rc3. - -Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse. - -## Security advisory - -The following issue is fixed in 1.62.0. - -* [GHSA-jhjh-776m-4765](https://github.com/matrix-org/synapse/security/advisories/GHSA-jhjh-776m-4765) / [CVE-2022-31152](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31152) - - Synapse instances prior to 1.62.0 did not implement the Matrix [event authorization rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules) correctly. An attacker could craft events which would be accepted by Synapse but not a spec-conformant server, potentially causing divergence in the room state between servers. - - Homeservers with federation disabled via the [`federation_domain_whitelist`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_domain_whitelist) config option are unaffected. - - Administrators of homeservers with federation enabled are advised to upgrade to v1.62.0 or higher. - - Fixed by [#13087](https://github.com/matrix-org/synapse/pull/13087) and [#13088](https://github.com/matrix-org/synapse/pull/13088). - -Synapse 1.62.0rc3 (2022-07-04) -============================== - -Bugfixes --------- - -- Update the version of the [ldap3 plugin](https://github.com/matrix-org/matrix-synapse-ldap3/) included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on `packages.matrix.org` to 0.2.1. This fixes [a bug](https://github.com/matrix-org/matrix-synapse-ldap3/pull/163) with usernames containing uppercase characters. ([\#13156](https://github.com/matrix-org/synapse/issues/13156)) -- Fix a bug introduced in Synapse 1.62.0rc1 affecting unread counts for users on small servers. ([\#13168](https://github.com/matrix-org/synapse/issues/13168)) - - -Synapse 1.62.0rc2 (2022-07-01) -============================== - -Bugfixes --------- - -- Fix unread counts for users on large servers. Introduced in v1.62.0rc1. ([\#13140](https://github.com/matrix-org/synapse/issues/13140)) -- Fix DB performance when deleting old push notifications. Introduced in v1.62.0rc1. ([\#13141](https://github.com/matrix-org/synapse/issues/13141)) - - -Synapse 1.62.0rc1 (2022-06-28) -============================== - -Features --------- - -- Port the spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. ([\#12857](https://github.com/matrix-org/synapse/issues/12857), [\#13047](https://github.com/matrix-org/synapse/issues/13047)) -- Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. ([\#13035](https://github.com/matrix-org/synapse/issues/13035)) -- Add metrics measuring the CPU and DB time spent in state resolution. ([\#13036](https://github.com/matrix-org/synapse/issues/13036)) -- Speed up fetching of device list changes in `/sync` and `/keys/changes`. ([\#13045](https://github.com/matrix-org/synapse/issues/13045), [\#13098](https://github.com/matrix-org/synapse/issues/13098)) -- Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. ([\#13056](https://github.com/matrix-org/synapse/issues/13056)) - - -Bugfixes --------- - -- Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939)) -- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973)) -- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979)) -- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced - in Synapse 1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991)) -- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018)) -- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041)) -- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088)) -- Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. ([\#13106](https://github.com/matrix-org/synapse/issues/13106)) - - -Improved Documentation ----------------------- - -- Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. ([\#12737](https://github.com/matrix-org/synapse/issues/12737)) -- Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. ([\#13022](https://github.com/matrix-org/synapse/issues/13022)) -- Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023)) -- Add instructions for running Complement with `gotestfmt`-formatted output locally. ([\#13073](https://github.com/matrix-org/synapse/issues/13073)) -- Update OpenTracing docs to reference the configuration manual rather than the configuration file. ([\#13076](https://github.com/matrix-org/synapse/issues/13076)) -- Update information on downstream Debian packages. ([\#13095](https://github.com/matrix-org/synapse/issues/13095)) -- Remove documentation for the Delete Group Admin API which no longer exists. ([\#13112](https://github.com/matrix-org/synapse/issues/13112)) - - -Deprecations and Removals -------------------------- - -- Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. ([\#13123](https://github.com/matrix-org/synapse/issues/13123)) - - -Internal Changes ----------------- - -- Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. ([\#12674](https://github.com/matrix-org/synapse/issues/12674)) -- Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. ([\#12738](https://github.com/matrix-org/synapse/issues/12738)) -- Merge the Complement testing Docker images into a single, multi-purpose image. ([\#12881](https://github.com/matrix-org/synapse/issues/12881), [\#13075](https://github.com/matrix-org/synapse/issues/13075)) -- Simplify the database schema for `event_edges`. ([\#12893](https://github.com/matrix-org/synapse/issues/12893)) -- Clean up the test code for client disconnection. ([\#12929](https://github.com/matrix-org/synapse/issues/12929)) -- Remove code generating comments in configuration. ([\#12941](https://github.com/matrix-org/synapse/issues/12941)) -- Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. ([\#12944](https://github.com/matrix-org/synapse/issues/12944)) -- Replace noop background updates with `DELETE` delta. ([\#12954](https://github.com/matrix-org/synapse/issues/12954), [\#13050](https://github.com/matrix-org/synapse/issues/13050)) -- Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12957](https://github.com/matrix-org/synapse/issues/12957)) -- Reduce the amount of state we pull from the DB. ([\#12963](https://github.com/matrix-org/synapse/issues/12963)) -- Enable testing against PostgreSQL databases in Complement CI. ([\#12965](https://github.com/matrix-org/synapse/issues/12965), [\#13034](https://github.com/matrix-org/synapse/issues/13034)) -- Fix an inaccurate comment. ([\#12969](https://github.com/matrix-org/synapse/issues/12969)) -- Remove the `delete_device` method and always call `delete_devices`. ([\#12970](https://github.com/matrix-org/synapse/issues/12970)) -- Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. ([\#12982](https://github.com/matrix-org/synapse/issues/12982)) -- Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. ([\#12984](https://github.com/matrix-org/synapse/issues/12984)) -- Add type hints to tests. ([\#12985](https://github.com/matrix-org/synapse/issues/12985), [\#13099](https://github.com/matrix-org/synapse/issues/13099)) -- Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. ([\#12986](https://github.com/matrix-org/synapse/issues/12986)) -- Fix documentation for running complement tests. ([\#12990](https://github.com/matrix-org/synapse/issues/12990)) -- Faster joins: add issue links to the TODO comments in the code. ([\#13004](https://github.com/matrix-org/synapse/issues/13004)) -- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13005](https://github.com/matrix-org/synapse/issues/13005), [\#13096](https://github.com/matrix-org/synapse/issues/13096), [\#13118](https://github.com/matrix-org/synapse/issues/13118)) -- Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. ([\#13011](https://github.com/matrix-org/synapse/issues/13011)) -- Modernize the `contrib/graph/` scripts. ([\#13013](https://github.com/matrix-org/synapse/issues/13013)) -- Remove redundant `room_version` parameters from event auth functions. ([\#13017](https://github.com/matrix-org/synapse/issues/13017)) -- Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. ([\#13021](https://github.com/matrix-org/synapse/issues/13021)) -- Add type annotations to `synapse.storage.databases.main.devices`. ([\#13025](https://github.com/matrix-org/synapse/issues/13025)) -- Set default `sync_response_cache_duration` to two minutes. ([\#13042](https://github.com/matrix-org/synapse/issues/13042)) -- Rename CI test runs. ([\#13046](https://github.com/matrix-org/synapse/issues/13046)) -- Increase timeout of complement CI test runs. ([\#13048](https://github.com/matrix-org/synapse/issues/13048)) -- Refactor entry points so that they all have a `main` function. ([\#13052](https://github.com/matrix-org/synapse/issues/13052)) -- Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. ([\#13054](https://github.com/matrix-org/synapse/issues/13054)) -- Add headers to individual options in config documentation to allow for linking. ([\#13055](https://github.com/matrix-org/synapse/issues/13055)) -- Make Complement CI logs easier to read. ([\#13057](https://github.com/matrix-org/synapse/issues/13057), [\#13058](https://github.com/matrix-org/synapse/issues/13058), [\#13069](https://github.com/matrix-org/synapse/issues/13069)) -- Don't instantiate modules with keyword arguments. ([\#13060](https://github.com/matrix-org/synapse/issues/13060)) -- Fix type checking errors against Twisted trunk. ([\#13061](https://github.com/matrix-org/synapse/issues/13061)) -- Allow MSC3030 `timestamp_to_event` calls from anyone on world-readable rooms. ([\#13062](https://github.com/matrix-org/synapse/issues/13062)) -- Add a CI job to check that schema deltas are in the correct folder. ([\#13063](https://github.com/matrix-org/synapse/issues/13063)) -- Avoid rechecking event auth rules which are independent of room state. ([\#13065](https://github.com/matrix-org/synapse/issues/13065)) -- Reduce the duplication of code that invokes the rate limiter. ([\#13070](https://github.com/matrix-org/synapse/issues/13070)) -- Add a Subject Alternative Name to the certificate generated for Complement tests. ([\#13071](https://github.com/matrix-org/synapse/issues/13071)) -- Add more tests for room upgrades. ([\#13074](https://github.com/matrix-org/synapse/issues/13074)) -- Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. ([\#13082](https://github.com/matrix-org/synapse/issues/13082)) -- Correctly report prometheus DB stats for `get_earliest_token_for_stats`. ([\#13085](https://github.com/matrix-org/synapse/issues/13085)) -- Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. ([\#13089](https://github.com/matrix-org/synapse/issues/13089)) -- Simplify the alias deletion logic as an application service. ([\#13093](https://github.com/matrix-org/synapse/issues/13093)) -- Add type annotations to `tests.test_server`. ([\#13124](https://github.com/matrix-org/synapse/issues/13124)) - - -Synapse 1.61.1 (2022-06-28) -=========================== - -This patch release fixes a security issue regarding URL previews, affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild. - -Server administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below. - -## Security advisory - -The following issue is fixed in 1.61.1. - -* [GHSA-22p3-qrh9-cx32](https://github.com/matrix-org/synapse/security/advisories/GHSA-22p3-qrh9-cx32) / [CVE-2022-31052](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31052) - - Synapse instances with the [`url_preview_enabled`](https://matrix-org.github.io/synapse/v1.61/usage/configuration/config_documentation.html#media-store) homeserver config option set to `true` are affected. URL previews of some web pages can lead to unbounded recursion, causing the request to either fail, or in some cases crash the running Synapse process. - - Requesting URL previews requires authentication. Nevertheless, it is possible to exploit this maliciously, either by malicious users on the homeserver, or by remote users sending URLs that a local user's client may automatically request a URL preview for. - - Homeservers with the `url_preview_enabled` configuration option set to `false` (the default) are unaffected. Instances with the `enable_media_repo` configuration option set to `false` are also unaffected, as this also disables URL preview functionality. - - Fixed by [fa1308061802ac7b7d20e954ba7372c5ac292333](https://github.com/matrix-org/synapse/commit/fa1308061802ac7b7d20e954ba7372c5ac292333). - -Synapse 1.61.0 (2022-06-14) -=========================== - -This release removes support for the non-standard feature known both as 'groups' and as 'communities', which have been superseded by *Spaces*. - -See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610) -for more details. - -Improved Documentation ----------------------- - -- Mention removed community/group worker endpoints in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610). Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023)) - - -Synapse 1.61.0rc1 (2022-06-07) -============================== - -Features --------- - -- Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. ([\#12732](https://github.com/matrix-org/synapse/issues/12732), [\#12972](https://github.com/matrix-org/synapse/issues/12972), [\#12977](https://github.com/matrix-org/synapse/issues/12977)) -- Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. ([\#12740](https://github.com/matrix-org/synapse/issues/12740), [\#12859](https://github.com/matrix-org/synapse/issues/12859)) -- Update to the `check_event_for_spam` module callback: Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808)) -- Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. ([\#12838](https://github.com/matrix-org/synapse/issues/12838), [\#12917](https://github.com/matrix-org/synapse/issues/12917)) -- Support the new error code `ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED` from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#12845](https://github.com/matrix-org/synapse/issues/12845), [\#12923](https://github.com/matrix-org/synapse/issues/12923)) -- Add a configurable background job to delete stale devices. ([\#12855](https://github.com/matrix-org/synapse/issues/12855)) -- Improve URL previews for pages with empty elements. ([\#12951](https://github.com/matrix-org/synapse/issues/12951)) -- Allow updating a user's password using the admin API without logging out their devices. Contributed by @jcgruenhage. ([\#12952](https://github.com/matrix-org/synapse/issues/12952)) - - -Bugfixes --------- - -- Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Application Service API specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). ([\#12746](https://github.com/matrix-org/synapse/issues/12746)) -- Implement [MSC3816](https://github.com/matrix-org/matrix-spec-proposals/pull/3816): sending the root event in a thread should count as having 'participated' in it. ([\#12766](https://github.com/matrix-org/synapse/issues/12766)) -- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12784](https://github.com/matrix-org/synapse/issues/12784)) -- Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. ([\#12829](https://github.com/matrix-org/synapse/issues/12829)) -- Fix a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. ([\#12832](https://github.com/matrix-org/synapse/issues/12832)) -- Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. ([\#12840](https://github.com/matrix-org/synapse/issues/12840)) -- Fix a bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). ([\#12843](https://github.com/matrix-org/synapse/issues/12843)) -- Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. ([\#12858](https://github.com/matrix-org/synapse/issues/12858)) -- Fix a bug introduced in Synapse 1.54.0 which could sometimes cause exceptions when handling federated traffic. ([\#12877](https://github.com/matrix-org/synapse/issues/12877)) -- Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation error. ([\#12889](https://github.com/matrix-org/synapse/issues/12889)) -- Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. ([\#12903](https://github.com/matrix-org/synapse/issues/12903)) -- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was a redaction of an event that has since been purged. ([\#12905](https://github.com/matrix-org/synapse/issues/12905)) -- Fix a potential memory leak when generating thumbnails. ([\#12932](https://github.com/matrix-org/synapse/issues/12932)) -- Fix a long-standing bug where a URL preview would break if the image failed to download. ([\#12950](https://github.com/matrix-org/synapse/issues/12950)) - - -Improved Documentation ----------------------- - -- Fix typographical errors in documentation. ([\#12863](https://github.com/matrix-org/synapse/issues/12863)) -- Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. ([\#12867](https://github.com/matrix-org/synapse/issues/12867)) - - -Deprecations and Removals -------------------------- - -- Remove support for the non-standard groups/communities feature from Synapse. ([\#12553](https://github.com/matrix-org/synapse/issues/12553), [\#12558](https://github.com/matrix-org/synapse/issues/12558), [\#12563](https://github.com/matrix-org/synapse/issues/12563), [\#12895](https://github.com/matrix-org/synapse/issues/12895), [\#12897](https://github.com/matrix-org/synapse/issues/12897), [\#12899](https://github.com/matrix-org/synapse/issues/12899), [\#12900](https://github.com/matrix-org/synapse/issues/12900), [\#12936](https://github.com/matrix-org/synapse/issues/12936), [\#12966](https://github.com/matrix-org/synapse/issues/12966)) -- Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. ([\#12908](https://github.com/matrix-org/synapse/issues/12908)) -- Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. ([\#12909](https://github.com/matrix-org/synapse/issues/12909)) -- Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. ([\#12910](https://github.com/matrix-org/synapse/issues/12910)) -- Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. ([\#12911](https://github.com/matrix-org/synapse/issues/12911)) - - -Internal Changes ----------------- - -- Test Synapse against Complement with workers. ([\#12810](https://github.com/matrix-org/synapse/issues/12810), [\#12933](https://github.com/matrix-org/synapse/issues/12933)) -- Reduce the amount of state we pull from the DB. ([\#12811](https://github.com/matrix-org/synapse/issues/12811), [\#12964](https://github.com/matrix-org/synapse/issues/12964)) -- Try other homeservers when re-syncing state for rooms with partial state. ([\#12812](https://github.com/matrix-org/synapse/issues/12812)) -- Resume state re-syncing for rooms with partial state after a Synapse restart. ([\#12813](https://github.com/matrix-org/synapse/issues/12813)) -- Remove Mutual Rooms' ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. ([\#12836](https://github.com/matrix-org/synapse/issues/12836)) -- Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. ([\#12846](https://github.com/matrix-org/synapse/issues/12846)) -- Remove `dont_notify` from the `.m.rule.room.server_acl` rule. ([\#12849](https://github.com/matrix-org/synapse/issues/12849)) -- Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12851](https://github.com/matrix-org/synapse/issues/12851)) -- Pull out less state when handling gaps in room DAG. ([\#12852](https://github.com/matrix-org/synapse/issues/12852), [\#12904](https://github.com/matrix-org/synapse/issues/12904)) -- Clean-up the push rules datastore. ([\#12856](https://github.com/matrix-org/synapse/issues/12856)) -- Correct a type annotation in the URL preview source code. ([\#12860](https://github.com/matrix-org/synapse/issues/12860)) -- Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). ([\#12865](https://github.com/matrix-org/synapse/issues/12865)) -- Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12866](https://github.com/matrix-org/synapse/issues/12866)) -- Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12868](https://github.com/matrix-org/synapse/issues/12868)) -- Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. ([\#12869](https://github.com/matrix-org/synapse/issues/12869)) -- Fix up the variable `state_store` naming. ([\#12871](https://github.com/matrix-org/synapse/issues/12871)) -- Faster room joins: when querying the current state of the room, wait for state to be populated. ([\#12872](https://github.com/matrix-org/synapse/issues/12872)) -- Avoid running queries which will never result in deletions. ([\#12879](https://github.com/matrix-org/synapse/issues/12879)) -- Use constants for EDU types. ([\#12884](https://github.com/matrix-org/synapse/issues/12884)) -- Reduce database load of `/sync` when presence is enabled. ([\#12885](https://github.com/matrix-org/synapse/issues/12885)) -- Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. ([\#12886](https://github.com/matrix-org/synapse/issues/12886)) -- Refactor receipt linearization code. ([\#12888](https://github.com/matrix-org/synapse/issues/12888)) -- Add type annotations to `synapse.logging.opentracing`. ([\#12894](https://github.com/matrix-org/synapse/issues/12894)) -- Remove PyNaCl occurrences directly used in Synapse code. ([\#12902](https://github.com/matrix-org/synapse/issues/12902)) -- Bump types-jsonschema from 4.4.1 to 4.4.6. ([\#12912](https://github.com/matrix-org/synapse/issues/12912)) -- Rename storage classes. ([\#12913](https://github.com/matrix-org/synapse/issues/12913)) -- Preparation for database schema simplifications: stop reading from `event_edges.room_id`. ([\#12914](https://github.com/matrix-org/synapse/issues/12914)) -- Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. ([\#12916](https://github.com/matrix-org/synapse/issues/12916)) -- Improve the logging when signature checks on events fail. ([\#12925](https://github.com/matrix-org/synapse/issues/12925)) - - -Synapse 1.60.0 (2022-05-31) -=========================== - -This release of Synapse adds a unique index to the `state_group_edges` table, in -order to prevent accidentally introducing duplicate information (for example, -because a database backup was restored multiple times). If your Synapse database -already has duplicate rows in this table, this could fail with an error and -require manual remediation. - -Additionally, the signature of the `check_event_for_spam` module callback has changed. -The previous signature has been deprecated and remains working for now. Module authors -should update their modules to use the new signature where possible. - -See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600) -for more details. - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.60.0rc1 that would break some imports from `synapse.module_api`. ([\#12918](https://github.com/matrix-org/synapse/issues/12918)) - - -Synapse 1.60.0rc2 (2022-05-27) -============================== - -Features --------- - -- Add an option allowing users to use their password to reauthenticate for privileged actions even though password login is disabled. ([\#12883](https://github.com/matrix-org/synapse/issues/12883)) - - -Bugfixes --------- - -- Explicitly close `ijson` coroutines once we are done with them, instead of leaving the garbage collector to close them. ([\#12875](https://github.com/matrix-org/synapse/issues/12875)) - - -Internal Changes ----------------- - -- Improve URL previews by not including the content of media tags in the generated description. ([\#12887](https://github.com/matrix-org/synapse/issues/12887)) - - -Synapse 1.60.0rc1 (2022-05-24) -============================== - -Features --------- - -- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513)) -- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618)) -- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623)) -- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809)) -- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673)) -- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701)) -- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718)) -- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744)) -- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792)) -- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611)) -- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683)) -- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687)) -- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696)) -- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713)) -- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721)) -- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747)) -- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762)) -- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770)) -- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779)) -- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794)) -- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803)) -- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823)) - - -Updates to the Docker image ---------------------------- - -- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853)) - - -Improved Documentation ----------------------- - -- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715)) -- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727)) -- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742)) -- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748)) -- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749)) -- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759)) -- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761)) -- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765)) -- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773)) -- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776)) -- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777)) -- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785)) - - -Deprecations and Removals -------------------------- - -- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709)) - - -Internal Changes ----------------- - -- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533)) -- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498)) -- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705)) -- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708)) -- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676)) -- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677)) -- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679)) -- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680)) -- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689)) -- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691)) -- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693)) -- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703)) -- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711)) -- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720)) -- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723)) -- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731)) -- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769)) -- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772)) -- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774)) -- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775)) -- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781)) -- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783)) -- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789)) -- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833)) -- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791)) -- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818)) -- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819)) -- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826)) -- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842)) - - -Synapse 1.59.1 (2022-05-18) -=========================== - -This release fixes a long-standing issue which could prevent Synapse's user directory for updating properly. - -Bugfixes ----------------- - -- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. Contributed by Nick @ Beeper. ([\#12762](https://github.com/matrix-org/synapse/issues/12762)) - - -Synapse 1.59.0 (2022-05-17) -=========================== - -Synapse 1.59 makes several changes that server administrators should be aware of: - -- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) -- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654)) - -See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details. - -Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597)) - - -Bugfixes --------- - -- Fix DB performance regression introduced in Synapse 1.59.0rc2. ([\#12745](https://github.com/matrix-org/synapse/issues/12745)) - - -Synapse 1.59.0rc2 (2022-05-16) -============================== - -Note: this release candidate includes a performance regression which can cause database disruption. Other release candidates in the v1.59.0 series are not affected, and a fix will be included in the v1.59.0 final release. - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was rejected. ([\#12729](https://github.com/matrix-org/synapse/issues/12729)) - - -Synapse 1.59.0rc1 (2022-05-10) -============================== - -Features --------- - -- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507)) -- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670)) -- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406)) -- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) -- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654)) -- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526)) -- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) -- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273)) -- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544)) -- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570)) -- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580)) -- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594)) -- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633)) -- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657)) -- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656)) - - -Updates to the Docker image ---------------------------- - -- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541)) -- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573)) - - -Improved Documentation ----------------------- - -- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536)) -- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579)) -- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621)) -- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627)) -- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664)) - - -Deprecations and Removals -------------------------- - -- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596)) -- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from - [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597)) -- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613)) - - -Internal Changes ----------------- - -- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480)) -- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500)) -- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505)) -- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564)) -- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568)) -- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577)) -- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581)) -- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582)) -- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663)) -- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589)) -- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599)) -- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602)) -- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610)) -- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614)) -- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) -- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620)) -- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624)) -- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632)) -- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637)) -- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652)) -- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665)) -- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556)) -- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612)) - -### Typechecking - -- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356)) -- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485)) -- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531)) -- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576)) -- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608)) -- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650)) -- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666)) -- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667)) -- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671)) - -Synapse 1.58.1 (2022-05-05) -=========================== - -This patch release includes a fix to the Debian packages, installing the -`systemd` and `cache_memory` extra package groups, which were incorrectly -omitted in v1.58.0. This primarily prevented Synapse from starting -when the `systemd.journal.JournalHandler` log handler was configured. -See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information. - -Otherwise, no significant changes since 1.58.0. - - -Synapse 1.58.0 (2022-05-03) -=========================== - -As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61. - -No significant changes since 1.58.0rc2. - - -Synapse 1.58.0rc2 (2022-04-26) -============================== - -This release candidate fixes bugs related to Synapse 1.58.0rc1's logic for handling device list updates. - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.58.0rc1 where the main process could consume excessive amounts of CPU and memory while handling sentry logging failures. ([\#12554](https://github.com/matrix-org/synapse/issues/12554)) -- Fix a bug introduced in Synapse 1.58.0rc1 where opentracing contexts were not correctly sent to whitelisted remote servers with device lists updates. ([\#12555](https://github.com/matrix-org/synapse/issues/12555)) - - -Internal Changes ----------------- - -- Reduce unnecessary work when handling remote device list updates. ([\#12557](https://github.com/matrix-org/synapse/issues/12557)) - - -Synapse 1.58.0rc1 (2022-04-26) -============================== - -Features --------- - -- Implement [MSC3383](https://github.com/matrix-org/matrix-spec-proposals/pull/3383) for including the destination in server-to-server authentication headers. Contributed by @Bubu and @jcgruenhage for Famedly. ([\#11398](https://github.com/matrix-org/synapse/issues/11398)) -- Docker images and Debian packages from matrix.org now contain a locked set of Python dependencies, greatly improving build reproducibility. ([Board](https://github.com/orgs/matrix-org/projects/54), [\#11537](https://github.com/matrix-org/synapse/issues/11537)) -- Enable processing of device list updates asynchronously. ([\#12365](https://github.com/matrix-org/synapse/issues/12365), [\#12465](https://github.com/matrix-org/synapse/issues/12465)) -- Implement [MSC2815](https://github.com/matrix-org/matrix-spec-proposals/pull/2815) to allow room moderators to view redacted event content. Contributed by @tulir @ Beeper. ([\#12427](https://github.com/matrix-org/synapse/issues/12427)) -- Build Debian packages for Ubuntu 22.04 "Jammy Jellyfish". ([\#12543](https://github.com/matrix-org/synapse/issues/12543)) - - -Bugfixes --------- - -- Prevent a sync request from removing a user's busy presence status. ([\#12213](https://github.com/matrix-org/synapse/issues/12213)) -- Fix bug with incremental sync missing events when rejoining/backfilling. Contributed by Nick @ Beeper. ([\#12319](https://github.com/matrix-org/synapse/issues/12319)) -- Fix a long-standing bug which incorrectly caused `GET /_matrix/client/v3/rooms/{roomId}/event/{eventId}` to return edited events rather than the original. ([\#12476](https://github.com/matrix-org/synapse/issues/12476)) -- Fix a bug introduced in Synapse 1.27.0 where the admin API for [deleting forward extremities](https://github.com/matrix-org/synapse/blob/erikj/fix_delete_event_response_count/docs/admin_api/rooms.md#deleting-forward-extremities) would always return a count of 1, no matter how many extremities were deleted. ([\#12496](https://github.com/matrix-org/synapse/issues/12496)) -- Fix a long-standing bug where the image thumbnails embedded into email notifications were broken. ([\#12510](https://github.com/matrix-org/synapse/issues/12510)) -- Fix a bug in the implementation of [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) where Synapse would use the field name `device_unused_fallback_keys`, rather than `device_unused_fallback_key_types`. ([\#12520](https://github.com/matrix-org/synapse/issues/12520)) -- Fix a bug introduced in Synapse 0.99.3 which could cause Synapse to consume large amounts of RAM when back-paginating in a large room. ([\#12522](https://github.com/matrix-org/synapse/issues/12522)) - - -Improved Documentation ----------------------- - -- Fix rendering of the documentation site when using the 'print' feature. ([\#12340](https://github.com/matrix-org/synapse/issues/12340)) -- Add a manual documenting config file options. ([\#12368](https://github.com/matrix-org/synapse/issues/12368), [\#12527](https://github.com/matrix-org/synapse/issues/12527)) -- Update documentation to reflect that both the `run_background_tasks_on` option and the options for moving stream writers off of the main process are no longer experimental. ([\#12451](https://github.com/matrix-org/synapse/issues/12451)) -- Update worker documentation and replace old `federation_reader` with `generic_worker`. ([\#12457](https://github.com/matrix-org/synapse/issues/12457)) -- Strongly recommend [Poetry](https://python-poetry.org/) for development. ([\#12475](https://github.com/matrix-org/synapse/issues/12475)) -- Add some example configurations for workers and update architectural diagram. ([\#12492](https://github.com/matrix-org/synapse/issues/12492)) -- Fix a broken link in `README.rst`. ([\#12495](https://github.com/matrix-org/synapse/issues/12495)) -- Add HAProxy delegation example with CORS headers to docs. ([\#12501](https://github.com/matrix-org/synapse/issues/12501)) -- Remove extraneous comma in User Admin API's device deletion section so that the example JSON is actually valid and works. Contributed by @olmari. ([\#12533](https://github.com/matrix-org/synapse/issues/12533)) - - -Deprecations and Removals -------------------------- - -- The groups/communities feature in Synapse is now disabled by default. ([\#12344](https://github.com/matrix-org/synapse/issues/12344)) -- Remove unstable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#12382](https://github.com/matrix-org/synapse/issues/12382)) - - -Internal Changes ----------------- - -- Preparation for faster-room-join work: start a background process to resynchronise the room state after a room join. ([\#12394](https://github.com/matrix-org/synapse/issues/12394)) -- Preparation for faster-room-join work: Implement a tracking mechanism to allow functions to wait for full room state to arrive. ([\#12399](https://github.com/matrix-org/synapse/issues/12399)) -- Remove an unstable identifier from [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083). ([\#12395](https://github.com/matrix-org/synapse/issues/12395)) -- Run CI in the locked [Poetry](https://python-poetry.org/) environment, and remove corresponding `tox` jobs. ([\#12425](https://github.com/matrix-org/synapse/issues/12425), [\#12434](https://github.com/matrix-org/synapse/issues/12434), [\#12438](https://github.com/matrix-org/synapse/issues/12438), [\#12441](https://github.com/matrix-org/synapse/issues/12441), [\#12449](https://github.com/matrix-org/synapse/issues/12449), [\#12478](https://github.com/matrix-org/synapse/issues/12478), [\#12514](https://github.com/matrix-org/synapse/issues/12514), [\#12472](https://github.com/matrix-org/synapse/issues/12472)) -- Change Mutual Rooms' `unstable_features` flag to `uk.half-shot.msc2666.mutual_rooms` which matches the current iteration of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666). ([\#12445](https://github.com/matrix-org/synapse/issues/12445)) -- Fix typo in the release script help string. ([\#12450](https://github.com/matrix-org/synapse/issues/12450)) -- Fix a minor typo in the Debian changelogs generated by the release script. ([\#12497](https://github.com/matrix-org/synapse/issues/12497)) -- Reintroduce the list of targets to the linter script, to avoid linting unwanted local-only directories during development. ([\#12455](https://github.com/matrix-org/synapse/issues/12455)) -- Limit length of `device_id` to less than 512 characters. ([\#12454](https://github.com/matrix-org/synapse/issues/12454)) -- Dockerfile-workers: reduce the amount we install in the image. ([\#12464](https://github.com/matrix-org/synapse/issues/12464)) -- Dockerfile-workers: give the master its own log config. ([\#12466](https://github.com/matrix-org/synapse/issues/12466)) -- complement-synapse-workers: factor out separate entry point script. ([\#12467](https://github.com/matrix-org/synapse/issues/12467)) -- Back out experimental implementation of [MSC2314](https://github.com/matrix-org/matrix-spec-proposals/pull/2314). ([\#12474](https://github.com/matrix-org/synapse/issues/12474)) -- Fix grammatical error in federation error response when the room version of a room is unknown. ([\#12483](https://github.com/matrix-org/synapse/issues/12483)) -- Remove unnecessary configuration overrides in tests. ([\#12511](https://github.com/matrix-org/synapse/issues/12511)) -- Refactor the relations code for clarity. ([\#12519](https://github.com/matrix-org/synapse/issues/12519)) -- Add type hints so `docker` and `stubs` directories pass `mypy --disallow-untyped-defs`. ([\#12528](https://github.com/matrix-org/synapse/issues/12528)) -- Update `delay_cancellation` to accept any awaitable, rather than just `Deferred`s. ([\#12468](https://github.com/matrix-org/synapse/issues/12468)) -- Handle cancellation in `EventsWorkerStore._get_events_from_cache_or_db`. ([\#12529](https://github.com/matrix-org/synapse/issues/12529)) - - -Synapse 1.57.1 (2022-04-20) -=========================== - -This is a patch release that only affects the Docker image. It is only of interest to administrators using [the LDAP module][LDAPModule] to authenticate their users. -If you have already upgraded to Synapse 1.57.0 without problem, then you have no need to upgrade to this patch release. - -[LDAPModule]: https://github.com/matrix-org/matrix-synapse-ldap3 - - -Updates to the Docker image ---------------------------- - -- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse 1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512)) - - -Synapse 1.57.0 (2022-04-19) -=========================== - -This version includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the way transaction IDs are managed for application services. If your deployment uses a dedicated worker for application service traffic, **it must be stopped** when the database is upgraded (which normally happens when the main process is upgraded), to ensure the change is made safely without any risk of reusing transaction IDs. - -See the [upgrade notes](https://github.com/matrix-org/synapse/blob/v1.57.0rc1/docs/upgrade.md#upgrading-to-v1570) for more details. - -No significant changes since 1.57.0rc1. - - -Synapse 1.57.0rc1 (2022-04-12) -============================== - -Features --------- - -- Send device list changes to application services as specified by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202), using unstable prefixes. The `msc3202_transaction_extensions` experimental homeserver config option must be enabled and `org.matrix.msc3202: true` must be present in the application service registration file for device list changes to be sent. The "left" field is currently always empty. ([\#11881](https://github.com/matrix-org/synapse/issues/11881)) -- Optimise fetching large quantities of missing room state over federation. ([\#12040](https://github.com/matrix-org/synapse/issues/12040)) -- Offload the `update_client_ip` background job from the main process to the background worker, when using Redis-based replication. ([\#12251](https://github.com/matrix-org/synapse/issues/12251)) -- Move `update_client_ip` background job from the main process to the background worker. ([\#12252](https://github.com/matrix-org/synapse/issues/12252)) -- Add a module callback to react to new 3PID (email address, phone number) associations. ([\#12302](https://github.com/matrix-org/synapse/issues/12302)) -- Add a configuration option to remove a specific set of rooms from sync responses. ([\#12310](https://github.com/matrix-org/synapse/issues/12310)) -- Add a module callback to react to account data changes. ([\#12327](https://github.com/matrix-org/synapse/issues/12327)) -- Allow setting user admin status using the module API. Contributed by Famedly. ([\#12341](https://github.com/matrix-org/synapse/issues/12341)) -- Reduce overhead of restarting synchrotrons. ([\#12367](https://github.com/matrix-org/synapse/issues/12367), [\#12372](https://github.com/matrix-org/synapse/issues/12372)) -- Update `/messages` to use historic pagination tokens if no `from` query parameter is given. ([\#12370](https://github.com/matrix-org/synapse/issues/12370)) -- Add a module API for reading and writing global account data. ([\#12391](https://github.com/matrix-org/synapse/issues/12391)) -- Support the stable `v1` endpoint for `/relations`, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12403](https://github.com/matrix-org/synapse/issues/12403)) -- Include bundled aggregations in search results - ([MSC3666](https://github.com/matrix-org/matrix-spec-proposals/pull/3666)). ([\#12436](https://github.com/matrix-org/synapse/issues/12436)) - - -Bugfixes --------- - -- Fix a long-standing bug where updates to the server notices user profile (display name/avatar URL) in the configuration would not be applied to pre-existing rooms. Contributed by Jorge Florian. ([\#12115](https://github.com/matrix-org/synapse/issues/12115)) -- Fix a long-standing bug where events from ignored users were still considered for bundled aggregations. ([\#12235](https://github.com/matrix-org/synapse/issues/12235), [\#12338](https://github.com/matrix-org/synapse/issues/12338)) -- Fix non-member state events not resolving for historical events when used in [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) `/batch_send` `state_events_at_start`. ([\#12329](https://github.com/matrix-org/synapse/issues/12329)) -- Fix a long-standing bug affecting URL previews that would generate a 500 response instead of a 403 if the previewed URL includes a port that isn't allowed by the relevant blacklist. ([\#12333](https://github.com/matrix-org/synapse/issues/12333)) -- Default to `private` room visibility rather than `public` when a client does not specify one, according to spec. ([\#12350](https://github.com/matrix-org/synapse/issues/12350)) -- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `limit` as a string. ([\#12364](https://github.com/matrix-org/synapse/issues/12364), [\#12410](https://github.com/matrix-org/synapse/issues/12410)) -- Fix a bug introduced in Synapse 1.49.0 which caused the `synapse_event_persisted_position` metric to have invalid values. ([\#12390](https://github.com/matrix-org/synapse/issues/12390)) - - -Updates to the Docker image ---------------------------- - -- Bundle locked versions of dependencies into the Docker image. ([\#12385](https://github.com/matrix-org/synapse/issues/12385), [\#12439](https://github.com/matrix-org/synapse/issues/12439)) -- Fix up healthcheck generation for workers docker image. ([\#12405](https://github.com/matrix-org/synapse/issues/12405)) - - -Improved Documentation ----------------------- - -- Clarify documentation for running SyTest against Synapse, including use of Postgres and worker mode. ([\#12271](https://github.com/matrix-org/synapse/issues/12271)) -- Document the behaviour of `LoggingTransaction.call_after` and `LoggingTransaction.call_on_exception` methods when transactions are retried. ([\#12315](https://github.com/matrix-org/synapse/issues/12315)) -- Update dead links in `check-newsfragment.sh` to point to the correct documentation URL. ([\#12331](https://github.com/matrix-org/synapse/issues/12331)) -- Upgrade the version of `mdbook` in CI to 0.4.17. ([\#12339](https://github.com/matrix-org/synapse/issues/12339)) -- Updates to the Room DAG concepts development document to clarify that we mark events as outliers because we don't have any state for them. ([\#12345](https://github.com/matrix-org/synapse/issues/12345)) -- Update the link to Redis pub/sub documentation in the workers documentation. ([\#12369](https://github.com/matrix-org/synapse/issues/12369)) -- Remove documentation for converting a legacy structured logging configuration to the new format. ([\#12392](https://github.com/matrix-org/synapse/issues/12392)) - - -Deprecations and Removals -------------------------- - -- Remove the unused and unstable `/aggregations` endpoint which was removed from [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12293](https://github.com/matrix-org/synapse/issues/12293)) - - -Internal Changes ----------------- - -- Remove lingering unstable references to MSC2403 (knocking). ([\#12165](https://github.com/matrix-org/synapse/issues/12165)) -- Avoid trying to calculate the state at outlier events. ([\#12191](https://github.com/matrix-org/synapse/issues/12191), [\#12316](https://github.com/matrix-org/synapse/issues/12316), [\#12330](https://github.com/matrix-org/synapse/issues/12330), [\#12332](https://github.com/matrix-org/synapse/issues/12332), [\#12409](https://github.com/matrix-org/synapse/issues/12409)) -- Omit sending "offline" presence updates to application services after they are initially configured. ([\#12193](https://github.com/matrix-org/synapse/issues/12193)) -- Switch to using a sequence to generate AS transaction IDs. Contributed by Nick @ Beeper. If running synapse with a dedicated appservice worker, this MUST be stopped before upgrading the main process and database. ([\#12209](https://github.com/matrix-org/synapse/issues/12209)) -- Add missing type hints for storage. ([\#12267](https://github.com/matrix-org/synapse/issues/12267)) -- Add missing type definitions for scripts in docker folder. Contributed by Jorge Florian. ([\#12280](https://github.com/matrix-org/synapse/issues/12280)) -- Move [MSC2654](https://github.com/matrix-org/matrix-doc/pull/2654) support behind an experimental configuration flag. ([\#12295](https://github.com/matrix-org/synapse/issues/12295)) -- Update docstrings to explain how to decipher live and historic pagination tokens. ([\#12317](https://github.com/matrix-org/synapse/issues/12317)) -- Add ground work for speeding up device list updates for users in large numbers of rooms. ([\#12321](https://github.com/matrix-org/synapse/issues/12321)) -- Fix typechecker problems exposed by signedjson 1.1.2. ([\#12326](https://github.com/matrix-org/synapse/issues/12326)) -- Remove the `tox` packaging job: it will be redundant once #11537 lands. ([\#12334](https://github.com/matrix-org/synapse/issues/12334)) -- Ignore `.envrc` for `direnv` users. ([\#12335](https://github.com/matrix-org/synapse/issues/12335)) -- Remove the (broadly unused, dev-only) dockerfile for pg tests. ([\#12336](https://github.com/matrix-org/synapse/issues/12336)) -- Remove redundant `get_success` calls in test code. ([\#12346](https://github.com/matrix-org/synapse/issues/12346)) -- Add type annotations for `tests/unittest.py`. ([\#12347](https://github.com/matrix-org/synapse/issues/12347)) -- Move single-use methods out of `TestCase`. ([\#12348](https://github.com/matrix-org/synapse/issues/12348)) -- Remove broken and unused development scripts. ([\#12349](https://github.com/matrix-org/synapse/issues/12349), [\#12351](https://github.com/matrix-org/synapse/issues/12351), [\#12355](https://github.com/matrix-org/synapse/issues/12355)) -- Convert `Linearizer` tests from `inlineCallbacks` to async. ([\#12353](https://github.com/matrix-org/synapse/issues/12353)) -- Update docstrings for `ReadWriteLock` tests. ([\#12354](https://github.com/matrix-org/synapse/issues/12354)) -- Refactor `Linearizer`, convert methods to async and use an async context manager. ([\#12357](https://github.com/matrix-org/synapse/issues/12357)) -- Fix a long-standing bug where `Linearizer`s could get stuck if a cancellation were to happen at the wrong time. ([\#12358](https://github.com/matrix-org/synapse/issues/12358)) -- Make `StreamToken.from_string` and `RoomStreamToken.parse` propagate cancellations instead of replacing them with `SynapseError`s. ([\#12366](https://github.com/matrix-org/synapse/issues/12366)) -- Add type hints to tests files. ([\#12371](https://github.com/matrix-org/synapse/issues/12371)) -- Allow specifying the Postgres database's port when running unit tests with Postgres. ([\#12376](https://github.com/matrix-org/synapse/issues/12376)) -- Remove temporary pin of signedjson<=1.1.1 that was added in Synapse 1.56.0. ([\#12379](https://github.com/matrix-org/synapse/issues/12379)) -- Add opentracing spans to calls to external cache. ([\#12380](https://github.com/matrix-org/synapse/issues/12380)) -- Lay groundwork for using `poetry` to manage Synapse's dependencies. ([\#12381](https://github.com/matrix-org/synapse/issues/12381), [\#12407](https://github.com/matrix-org/synapse/issues/12407), [\#12412](https://github.com/matrix-org/synapse/issues/12412), [\#12418](https://github.com/matrix-org/synapse/issues/12418)) -- Make missing `importlib_metadata` dependency explicit. ([\#12384](https://github.com/matrix-org/synapse/issues/12384), [\#12400](https://github.com/matrix-org/synapse/issues/12400)) -- Update type annotations for compatiblity with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389)) -- Remove support for the unstable identifiers specified in [MSC3288](https://github.com/matrix-org/matrix-doc/pull/3288). ([\#12398](https://github.com/matrix-org/synapse/issues/12398)) -- Add missing type hints to configuration classes. ([\#12402](https://github.com/matrix-org/synapse/issues/12402)) -- Add files used to build the Docker image used for complement testing into the Synapse repository. ([\#12404](https://github.com/matrix-org/synapse/issues/12404)) -- Do not include groups in the sync response when disabled. ([\#12408](https://github.com/matrix-org/synapse/issues/12408)) -- Improve type hints related to HTTP query parameters. ([\#12415](https://github.com/matrix-org/synapse/issues/12415)) -- Stop maintaining a list of lint targets. ([\#12420](https://github.com/matrix-org/synapse/issues/12420)) -- Make `synapse._scripts` pass type checks. ([\#12421](https://github.com/matrix-org/synapse/issues/12421), [\#12422](https://github.com/matrix-org/synapse/issues/12422)) -- Add some type hints to datastore. ([\#12423](https://github.com/matrix-org/synapse/issues/12423)) -- Enable certificate checking during complement tests. ([\#12435](https://github.com/matrix-org/synapse/issues/12435)) -- Explicitly specify the `tls` extra for Twisted dependency. ([\#12444](https://github.com/matrix-org/synapse/issues/12444)) - - -Synapse 1.56.0 (2022-04-05) -=========================== - -Synapse will now refuse to start up if open registration is enabled, in order to help mitigate -abuse across the federation. If you would like -to provide registration to anyone, consider adding [email](https://github.com/matrix-org/synapse/blob/8a519f8abc6de772167c2cca101d22ee2052fafc/docs/sample_config.yaml#L1285), -[recaptcha](https://matrix-org.github.io/synapse/v1.56/CAPTCHA_SETUP.html) -or [token-based](https://matrix-org.github.io/synapse/v1.56/usage/administration/admin_api/registration_tokens.html) verification -in order to prevent automated registration from bad actors. -This check can be disabled by setting the `enable_registration_without_verification` option in your -homeserver configuration file to `true`. More details are available in the -[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade.html#open-registration-without-verification-is-now-disabled-by-default). - -Synapse will additionally now refuse to start when using PostgreSQL with a non-`C` values for `COLLATE` and `CTYPE`, unless -the config flag `allow_unsafe_locale`, found in the database section of the configuration file, is set to `true`. See the -[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade#change-in-behaviour-for-postgresql-databases-with-unsafe-locale) -for details. - -Internal Changes ----------------- - -- Bump the version of `black` for compatibility with the latest `click` release. ([\#12320](https://github.com/matrix-org/synapse/issues/12320)) - - -Synapse 1.56.0rc1 (2022-03-29) -============================== - -Features --------- - -- Allow modules to store already existing 3PID associations. ([\#12195](https://github.com/matrix-org/synapse/issues/12195)) -- Allow registering server administrators using the module API. Contributed by Famedly. ([\#12250](https://github.com/matrix-org/synapse/issues/12250)) - - -Bugfixes --------- - -- Fix a long-standing bug which caused the `/_matrix/federation/v1/state` and `/_matrix/federation/v1/state_ids` endpoints to return incorrect or invalid data when called for an event which we have stored as an "outlier". ([\#12087](https://github.com/matrix-org/synapse/issues/12087)) -- Fix a long-standing bug where events from ignored users would still be considered for relations. ([\#12227](https://github.com/matrix-org/synapse/issues/12227), [\#12232](https://github.com/matrix-org/synapse/issues/12232), [\#12285](https://github.com/matrix-org/synapse/issues/12285)) -- Fix a bug introduced in Synapse 1.53.0 where an unnecessary query could be performed when fetching bundled aggregations for threads. ([\#12228](https://github.com/matrix-org/synapse/issues/12228)) -- Fix a bug introduced in Synapse 1.52.0 where admins could not deactivate and GDPR-erase a user if Synapse was configured with limits on avatars. ([\#12261](https://github.com/matrix-org/synapse/issues/12261)) - - -Improved Documentation ----------------------- - -- Fix the link to the module documentation in the legacy spam checker warning message. ([\#12231](https://github.com/matrix-org/synapse/issues/12231)) -- Remove incorrect prefixes in the worker documentation for some endpoints. ([\#12243](https://github.com/matrix-org/synapse/issues/12243)) -- Correct `check_username_for_spam` annotations and docs. ([\#12246](https://github.com/matrix-org/synapse/issues/12246)) -- Correct Authentik OpenID typo, and add notes on troubleshooting. Contributed by @IronTooch. ([\#12275](https://github.com/matrix-org/synapse/issues/12275)) -- HAProxy reverse proxy guide update to stop sending IPv4-mapped address to homeserver. Contributed by @villepeh. ([\#12279](https://github.com/matrix-org/synapse/issues/12279)) - - -Internal Changes ----------------- - -- Rename `shared_rooms` to `mutual_rooms` ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), as per proposal changes. ([\#12036](https://github.com/matrix-org/synapse/issues/12036)) -- Remove check on `update_user_directory` for shared rooms handler ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), and update/expand documentation. ([\#12038](https://github.com/matrix-org/synapse/issues/12038)) -- Refactor `create_new_client_event` to use a new parameter, `state_event_ids`, which accurately describes the usage with [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) instead of abusing `auth_event_ids`. ([\#12083](https://github.com/matrix-org/synapse/issues/12083), [\#12304](https://github.com/matrix-org/synapse/issues/12304)) -- Refuse to start if registration is enabled without email, captcha, or token-based verification unless the new config flag `enable_registration_without_verification` is set to `true`. ([\#12091](https://github.com/matrix-org/synapse/issues/12091), [\#12322](https://github.com/matrix-org/synapse/issues/12322)) -- Add tests for database transaction callbacks. ([\#12198](https://github.com/matrix-org/synapse/issues/12198)) -- Handle cancellation in `DatabasePool.runInteraction`. ([\#12199](https://github.com/matrix-org/synapse/issues/12199)) -- Add missing type hints for cache storage. ([\#12216](https://github.com/matrix-org/synapse/issues/12216)) -- Add missing type hints for storage. ([\#12248](https://github.com/matrix-org/synapse/issues/12248), [\#12255](https://github.com/matrix-org/synapse/issues/12255)) -- Add type hints to tests files. ([\#12224](https://github.com/matrix-org/synapse/issues/12224), [\#12240](https://github.com/matrix-org/synapse/issues/12240), [\#12256](https://github.com/matrix-org/synapse/issues/12256)) -- Use type stubs for `psycopg2`. ([\#12269](https://github.com/matrix-org/synapse/issues/12269)) -- Improve type annotations for `execute_values`. ([\#12311](https://github.com/matrix-org/synapse/issues/12311)) -- Clean-up logic around rebasing URLs for URL image previews. ([\#12219](https://github.com/matrix-org/synapse/issues/12219)) -- Use the `ignored_users` table in additional places instead of re-parsing the account data. ([\#12225](https://github.com/matrix-org/synapse/issues/12225)) -- Refactor the relations endpoints to add a `RelationsHandler`. ([\#12237](https://github.com/matrix-org/synapse/issues/12237)) -- Generate announcement links in the release script. ([\#12242](https://github.com/matrix-org/synapse/issues/12242)) -- Improve error message when dependencies check finds a broken installation. ([\#12244](https://github.com/matrix-org/synapse/issues/12244)) -- Compress metrics HTTP resource when enabled. Contributed by Nick @ Beeper. ([\#12258](https://github.com/matrix-org/synapse/issues/12258)) -- Refuse to start if the PostgreSQL database has a non-`C` locale, unless the config flag `allow_unsafe_db_locale` is set to true. ([\#12262](https://github.com/matrix-org/synapse/issues/12262), [\#12288](https://github.com/matrix-org/synapse/issues/12288)) -- Optionally include account validity expiration information to experimental [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) account status responses. ([\#12266](https://github.com/matrix-org/synapse/issues/12266)) -- Add a new cache `_get_membership_from_event_id` to speed up push rule calculations in large rooms. ([\#12272](https://github.com/matrix-org/synapse/issues/12272)) -- Re-enable Complement concurrency in CI. ([\#12283](https://github.com/matrix-org/synapse/issues/12283)) -- Remove unused test utilities. ([\#12291](https://github.com/matrix-org/synapse/issues/12291)) -- Enhance logging for inbound federation events. ([\#12301](https://github.com/matrix-org/synapse/issues/12301)) -- Fix compatibility with the recently-released Jinja 3.1. ([\#12313](https://github.com/matrix-org/synapse/issues/12313)) -- Avoid trying to calculate the state at outlier events. ([\#12314](https://github.com/matrix-org/synapse/issues/12314)) - - -Synapse 1.55.2 (2022-03-24) -=========================== - -This patch version reverts the earlier fixes from Synapse 1.55.1, which could cause problems in certain deployments, and instead adds a cap to the version of Jinja to be installed. Again, this is to fix an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, and again, deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected. - -Internal Changes ----------------- - -- Pin Jinja to <3.1.0, as Synapse fails to start with Jinja 3.1.0. ([\#12297](https://github.com/matrix-org/synapse/issues/12297)) -- Revert changes from 1.55.1 as they caused problems with older versions of Jinja ([\#12296](https://github.com/matrix-org/synapse/issues/12296)) - - -Synapse 1.55.1 (2022-03-24) -=========================== - -This is a patch release that fixes an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, released on March 24th, 2022. Deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected. - -Internal Changes ----------------- - -- Remove uses of the long-deprecated `jinja2.Markup` which would prevent Synapse from starting with Jinja 3.1.0 or above installed. ([\#12289](https://github.com/matrix-org/synapse/issues/12289)) - - -Synapse 1.55.0 (2022-03-22) -=========================== - -This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. - -This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details. - - -Internal Changes ----------------- - -- Tweak copy for default Single Sign-On account details template to better adhere to mobile app store guidelines. ([\#12265](https://github.com/matrix-org/synapse/issues/12265), [\#12260](https://github.com/matrix-org/synapse/issues/12260)) - - -Synapse 1.55.0rc1 (2022-03-15) -============================== - -Features --------- - -- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) -- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) -- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) -- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) -- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212)) - - -Bugfixes --------- - -- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse 1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) -- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) -- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) -- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse 1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) -- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) -- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234)) - - -Improved Documentation ----------------------- - -- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998)) -- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143)) -- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179)) -- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196)) -- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204)) - - -Deprecations and Removals -------------------------- - -- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** -- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) -- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) -- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) - - -Internal Changes ----------------- - -- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915)) -- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980)) -- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042)) -- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101)) -- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208)) -- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118)) -- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120)) -- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128)) -- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) -- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) -- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) -- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) -- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) -- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) -- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149)) -- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150)) -- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152)) -- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153)) -- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154)) -- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202)) -- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156)) -- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159)) -- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161)) -- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163)) -- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180)) -- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182)) -- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183)) -- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187)) -- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188)) -- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192)) -- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197)) -- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203)) -- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206)) -- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210)) -- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211)) - - -Synapse 1.54.0 (2022-03-08) -=========================== - -Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. -Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141)) -- Fix a bug introduced in Synapse 1.54.0rc1 where runtime dependency version checks would mistakenly check development dependencies if they were present and would not accept pre-release versions of dependencies. ([\#12129](https://github.com/matrix-org/synapse/issues/12129), [\#12177](https://github.com/matrix-org/synapse/issues/12177)) - - -Internal Changes ----------------- - -- Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) -- Relax the version guard for "packaging" added in [\#12088](https://github.com/matrix-org/synapse/issues/12088). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) - - -Synapse 1.54.0rc1 (2022-03-02) -============================== - - -Features --------- - -- Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) -- Improve the generated URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) -- Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) -- Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) -- Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) -- Advertise Matrix 1.1 and 1.2 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020), ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) -- Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021)) -- Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058)) -- Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) -- Fix long-standing bug where the `get_rooms_for_user` cache was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) -- Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) -- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) -- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) -- Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) -- Fix occasional `Unhandled error in Deferred` error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) -- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) -- Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100)) -- Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105)) -- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. This reduces server load and load on the receiving device. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) - - -Updates to the Docker image ---------------------------- - -- The Docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) -- Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112)) - - -Improved Documentation ----------------------- - -- Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. ([\#11599](https://github.com/matrix-org/synapse/issues/11599)) -- Explain the meaning of spam checker callbacks' return values. ([\#12003](https://github.com/matrix-org/synapse/issues/12003)) -- Clarify information about external Identity Provider IDs. ([\#12004](https://github.com/matrix-org/synapse/issues/12004)) - - -Deprecations and Removals -------------------------- - -- Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. ([\#11865](https://github.com/matrix-org/synapse/issues/11865)) -- Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). ([\#12008](https://github.com/matrix-org/synapse/issues/12008)) -- Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. ([\#12018](https://github.com/matrix-org/synapse/issues/12018)) -- Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12073](https://github.com/matrix-org/synapse/issues/12073)) - - -Internal Changes ----------------- - -- Make the `get_room_version` method use `get_room_version_id` to benefit from caching. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) -- Remove unnecessary condition on knock -> leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) -- Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972)) -- Optimise calculating `device_list` changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) -- Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984)) -- Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991)) -- Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994)) -- Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996)) -- Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039)) -- Preparation for faster-room-join work: parse MSC3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) -- Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012)) -- Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013)) -- Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015)) -- Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016)) -- Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019)) -- Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025)) -- Upgrade Mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) -- Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070)) -- Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069)) -- After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041)) -- Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051)) -- Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059)) -- Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060)) -- Fix using the `complement.sh` script without specifying a directory or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) -- Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094)) -- Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068)) -- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088)) -- Use `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) -- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to `/versions`. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) -- Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106)) -- Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109)) -- Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111)) -- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12119](https://github.com/matrix-org/synapse/issues/12119)) - - -Synapse 1.53.0 (2022-02-22) -=========================== - -No significant changes since 1.53.0rc1. - - -Synapse 1.53.0rc1 (2022-02-15) -============================== - -Features --------- - -- Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966)) -- Add a background database update to purge account data for deactivated users. ([\#11655](https://github.com/matrix-org/synapse/issues/11655)) -- Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. ([\#11837](https://github.com/matrix-org/synapse/issues/11837)) -- Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. ([\#11849](https://github.com/matrix-org/synapse/issues/11849)) -- Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. ([\#11854](https://github.com/matrix-org/synapse/issues/11854)) -- Stabilize support and remove unstable endpoints for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients must switch to the stable identifier and endpoint. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#stablisation-of-msc3231) for more information. ([\#11867](https://github.com/matrix-org/synapse/issues/11867)) -- Allow modules to retrieve the current instance's server name and worker name. ([\#11868](https://github.com/matrix-org/synapse/issues/11868)) -- Use a dedicated configurable rate limiter for 3PID invites. ([\#11892](https://github.com/matrix-org/synapse/issues/11892)) -- Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. ([\#11933](https://github.com/matrix-org/synapse/issues/11933), [\#11989](https://github.com/matrix-org/synapse/issues/11989)) -- Support the `dir` parameter on the `/relations` endpoint, per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#11941](https://github.com/matrix-org/synapse/issues/11941)) -- Experimental implementation of [MSC3706](https://github.com/matrix-org/matrix-doc/pull/3706): extensions to `/send_join` to support reduced response size. ([\#11967](https://github.com/matrix-org/synapse/issues/11967)) - - -Bugfixes --------- - -- Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. ([\#11114](https://github.com/matrix-org/synapse/issues/11114)) -- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#11890](https://github.com/matrix-org/synapse/issues/11890)) -- Fix a long-standing bug where some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11930](https://github.com/matrix-org/synapse/issues/11930)) -- Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. ([\#11936](https://github.com/matrix-org/synapse/issues/11936)) -- Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. ([\#11952](https://github.com/matrix-org/synapse/issues/11952)) -- Require that modules register their callbacks using keyword arguments. ([\#11975](https://github.com/matrix-org/synapse/issues/11975)) -- Fix a long-standing bug where `M_WRONG_ROOM_KEYS_VERSION` errors would not include the specced `current_version` field. ([\#11988](https://github.com/matrix-org/synapse/issues/11988)) - - -Improved Documentation ----------------------- - -- Fix typo in User Admin API: unpind -> unbind. ([\#11859](https://github.com/matrix-org/synapse/issues/11859)) -- Document images returned by the User List Media Admin API can include those generated by URL previews. ([\#11862](https://github.com/matrix-org/synapse/issues/11862)) -- Remove outdated MSC1711 FAQ document. ([\#11907](https://github.com/matrix-org/synapse/issues/11907)) -- Correct the structured logging configuration example. Contributed by Brad Jones. ([\#11946](https://github.com/matrix-org/synapse/issues/11946)) -- Add information on the Synapse release cycle. ([\#11954](https://github.com/matrix-org/synapse/issues/11954)) -- Fix broken link in the README to the admin API for password reset. ([\#11955](https://github.com/matrix-org/synapse/issues/11955)) - - -Deprecations and Removals -------------------------- - -- Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error. ([\#11895](https://github.com/matrix-org/synapse/issues/11895)) -- Remove deprecated `user_may_create_room_with_invites` spam checker callback. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html#removal-of-user_may_create_room_with_invites) for more information. ([\#11950](https://github.com/matrix-org/synapse/issues/11950)) -- No longer build `.deb` packages for Ubuntu 21.04 Hirsute Hippo, which has now EOLed. ([\#11961](https://github.com/matrix-org/synapse/issues/11961)) - - -Internal Changes ----------------- - -- Enhance user registration test helpers to make them more useful for tests involving application services and devices. ([\#11615](https://github.com/matrix-org/synapse/issues/11615), [\#11616](https://github.com/matrix-org/synapse/issues/11616)) -- Improve performance when fetching bundled aggregations for multiple events. ([\#11660](https://github.com/matrix-org/synapse/issues/11660), [\#11752](https://github.com/matrix-org/synapse/issues/11752)) -- Fix type errors introduced by new annotations in the Prometheus Client library. ([\#11832](https://github.com/matrix-org/synapse/issues/11832)) -- Add missing type hints to replication code. ([\#11856](https://github.com/matrix-org/synapse/issues/11856), [\#11938](https://github.com/matrix-org/synapse/issues/11938)) -- Ensure that `opentracing` scopes are activated and closed at the right time. ([\#11869](https://github.com/matrix-org/synapse/issues/11869)) -- Improve opentracing for incoming federation requests. ([\#11870](https://github.com/matrix-org/synapse/issues/11870)) -- Improve internal docstrings in `synapse.util.caches`. ([\#11876](https://github.com/matrix-org/synapse/issues/11876)) -- Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes. ([\#11878](https://github.com/matrix-org/synapse/issues/11878)) -- Convert `ApplicationServiceTestCase` to use `simple_async_mock`. ([\#11880](https://github.com/matrix-org/synapse/issues/11880)) -- Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled. ([\#11884](https://github.com/matrix-org/synapse/issues/11884)) -- Disable coverage calculation for olddeps build. ([\#11888](https://github.com/matrix-org/synapse/issues/11888)) -- Preparation to support sending device list updates to application services. ([\#11905](https://github.com/matrix-org/synapse/issues/11905)) -- Add a test that checks users receive their own device list updates down `/sync`. ([\#11909](https://github.com/matrix-org/synapse/issues/11909)) -- Run Complement tests sequentially. ([\#11910](https://github.com/matrix-org/synapse/issues/11910)) -- Various refactors to the application service notifier code. ([\#11911](https://github.com/matrix-org/synapse/issues/11911), [\#11912](https://github.com/matrix-org/synapse/issues/11912)) -- Tests: replace mocked `Authenticator` with the real thing. ([\#11913](https://github.com/matrix-org/synapse/issues/11913)) -- Various refactors to the typing notifications code. ([\#11914](https://github.com/matrix-org/synapse/issues/11914)) -- Use the proper type for the `Content-Length` header in the `UploadResource`. ([\#11927](https://github.com/matrix-org/synapse/issues/11927)) -- Remove an unnecessary ignoring of type hints due to fixes in upstream packages. ([\#11939](https://github.com/matrix-org/synapse/issues/11939)) -- Add missing type hints. ([\#11953](https://github.com/matrix-org/synapse/issues/11953)) -- Fix an import cycle in `synapse.event_auth`. ([\#11965](https://github.com/matrix-org/synapse/issues/11965)) -- Unpin `frozendict` but exclude the known bad version 2.1.2. ([\#11969](https://github.com/matrix-org/synapse/issues/11969)) -- Prepare for rename of default Complement branch. ([\#11971](https://github.com/matrix-org/synapse/issues/11971)) -- Fetch Synapse's version using a helper from `matrix-common`. ([\#11979](https://github.com/matrix-org/synapse/issues/11979)) - - -Synapse 1.52.0 (2022-02-08) -=========================== - -No significant changes since 1.52.0rc1. - -Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0) -has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) -within the Twisted library. We do not believe Synapse is affected by this vulnerability, -though we advise server administrators who installed Synapse via pip to upgrade Twisted -with `pip install --upgrade Twisted treq` as a matter of good practice. The Docker image -`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the -updated library. - - -Synapse 1.52.0rc1 (2022-02-01) -============================== - -Features --------- - -- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789)) -- Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) -- Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658)) -- Add a config flag to inhibit `M_USER_IN_USE` during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) -- Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790)) -- Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846)) - - -Bugfixes --------- - -- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612)) -- Fix a long-standing bug when previewing Reddit URLs which do not contain an image. ([\#11767](https://github.com/matrix-org/synapse/issues/11767)) -- Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. ([\#11784](https://github.com/matrix-org/synapse/issues/11784)) -- Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. ([\#11798](https://github.com/matrix-org/synapse/issues/11798)) -- Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. ([\#11827](https://github.com/matrix-org/synapse/issues/11827)) - - -Improved Documentation ----------------------- - -- Update pypi installation docs to indicate that we now support Python 3.10. ([\#11820](https://github.com/matrix-org/synapse/issues/11820)) -- Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. ([\#11821](https://github.com/matrix-org/synapse/issues/11821)) -- Remove not needed old table of contents in documentation. ([\#11860](https://github.com/matrix-org/synapse/issues/11860)) -- Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. ([\#11861](https://github.com/matrix-org/synapse/issues/11861)) - - -Deprecations and Removals -------------------------- - -- Drop support for Python 3.6, which is EOL. ([\#11683](https://github.com/matrix-org/synapse/issues/11683)) -- Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. ([\#11843](https://github.com/matrix-org/synapse/issues/11843)) - - -Internal Changes ----------------- - -- Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. ([\#11792](https://github.com/matrix-org/synapse/issues/11792)) -- Add `FrozenEvent.get_state_key` and use it in a couple of places. ([\#11793](https://github.com/matrix-org/synapse/issues/11793)) -- Preparation for database schema simplifications: stop reading from `event_reference_hashes`. ([\#11794](https://github.com/matrix-org/synapse/issues/11794)) -- Drop unused table `public_room_list_stream`. ([\#11795](https://github.com/matrix-org/synapse/issues/11795)) -- Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. ([\#11799](https://github.com/matrix-org/synapse/issues/11799), [\#11847](https://github.com/matrix-org/synapse/issues/11847)) -- Docker: skip the initial amd64-only build and go straight to multiarch. ([\#11810](https://github.com/matrix-org/synapse/issues/11810)) -- Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811)) -- Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813)) -- Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815)) -- Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830)) -- Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823)) -- Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834)) -- Minor performance improvement in room state lookup. ([\#11836](https://github.com/matrix-org/synapse/issues/11836)) -- Fix some indentation inconsistencies in the sample config. ([\#11838](https://github.com/matrix-org/synapse/issues/11838)) -- Add type hints to `tests/rest/admin`. ([\#11851](https://github.com/matrix-org/synapse/issues/11851)) - - -Synapse 1.51.0 (2022-01-25) -=========================== - -No significant changes since 1.51.0rc2. - -Synapse 1.51.0 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1510). - -Synapse 1.51.0rc2 (2022-01-24) -============================== - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) - - -Synapse 1.50.2 (2022-01-24) -=========================== - -This release includes the same bugfix as Synapse 1.51.0rc2. - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) - - -Synapse 1.51.0rc1 (2022-01-21) -============================== - -Features --------- - -- Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. ([\#11561](https://github.com/matrix-org/synapse/issues/11561), [\#11749](https://github.com/matrix-org/synapse/issues/11749), [\#11757](https://github.com/matrix-org/synapse/issues/11757)) -- Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11577](https://github.com/matrix-org/synapse/issues/11577)) -- Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. ([\#11672](https://github.com/matrix-org/synapse/issues/11672)) -- Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. ([\#11675](https://github.com/matrix-org/synapse/issues/11675), [\#11770](https://github.com/matrix-org/synapse/issues/11770)) - - -Bugfixes --------- - -- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events - received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530)) -- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587)) -- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593)) -- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791)) -- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667)) -- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669)) -- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse 1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695)) -- Fix a bug introduced in Synapse 1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745)) -- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737)) -- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775)) -- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786)) - - -Improved Documentation ----------------------- - -- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) -- Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715)) -- Document that the minimum supported PostgreSQL version is now 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) -- Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) -- Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) -- Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) -- Update documentation for configuring login with Facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) -- Update installation instructions to note that Python 3.6 is no longer supported. ([\#11781](https://github.com/matrix-org/synapse/issues/11781)) - - -Deprecations and Removals -------------------------- - -- Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682)) -- Remove `python_twisted_reactor_pending_calls` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724)) -- Remove the `password_hash` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576)) -- **Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783))** - - -Internal Changes ----------------- - -- Run `pyupgrade --py37-plus --keep-percent-format` on Synapse. ([\#11685](https://github.com/matrix-org/synapse/issues/11685)) -- Use buildkit's cache feature to speed up docker builds. ([\#11691](https://github.com/matrix-org/synapse/issues/11691)) -- Use `auto_attribs` and native type hints for attrs classes. ([\#11692](https://github.com/matrix-org/synapse/issues/11692), [\#11768](https://github.com/matrix-org/synapse/issues/11768)) -- Remove debug logging for #4422, which has been closed since Synapse 0.99. ([\#11693](https://github.com/matrix-org/synapse/issues/11693)) -- Remove fallback code for Python 2. ([\#11699](https://github.com/matrix-org/synapse/issues/11699)) -- Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. ([\#11701](https://github.com/matrix-org/synapse/issues/11701)) -- Add the option to write SQLite test dbs to disk when running tests. ([\#11702](https://github.com/matrix-org/synapse/issues/11702)) -- Improve Complement test output for Gitub Actions. ([\#11707](https://github.com/matrix-org/synapse/issues/11707)) -- Fix docstring on `add_account_data_for_user`. ([\#11716](https://github.com/matrix-org/synapse/issues/11716)) -- Complement environment variable name change and update `.gitignore`. ([\#11718](https://github.com/matrix-org/synapse/issues/11718)) -- Simplify calculation of Prometheus metrics for garbage collection. ([\#11723](https://github.com/matrix-org/synapse/issues/11723)) -- Improve accuracy of `python_twisted_reactor_tick_time` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724), [\#11771](https://github.com/matrix-org/synapse/issues/11771)) -- Minor efficiency improvements when inserting many values into the database. ([\#11742](https://github.com/matrix-org/synapse/issues/11742)) -- Invite PR authors to give themselves credit in the changelog. ([\#11744](https://github.com/matrix-org/synapse/issues/11744)) -- Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). ([\#11760](https://github.com/matrix-org/synapse/issues/11760)) -- Remove `log_function` utility function and its uses. ([\#11761](https://github.com/matrix-org/synapse/issues/11761)) -- Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. ([\#11765](https://github.com/matrix-org/synapse/issues/11765)) -- Allow overriding complement commit using `COMPLEMENT_REF`. ([\#11766](https://github.com/matrix-org/synapse/issues/11766)) -- Add some comments and type annotations for `_update_outliers_txn`. ([\#11776](https://github.com/matrix-org/synapse/issues/11776)) - - -Synapse 1.50.1 (2022-01-18) -=========================== - -This release fixes a bug in Synapse 1.50.0 that could prevent clients from being able to connect to Synapse if the `webclient` resource was enabled. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763). - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the `webclient` resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764)) - - -Synapse 1.50.0 (2022-01-18) -=========================== - -**This release contains a critical bug that may prevent clients from being able to connect. -As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to -to 1.50.1. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).** - -Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. - -No significant changes since 1.50.0rc2. - - -Synapse 1.50.0rc2 (2022-01-14) -============================== - -This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1. - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729)) -- Fix a bug introduced in Synapse 1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730)) - - -Improved Documentation ----------------------- - -- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) - - -Internal Changes ----------------- - -- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714)) - - -Synapse 1.50.0rc1 (2022-01-05) -============================== - - -Features --------- - -- Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419). ([\#11378](https://github.com/matrix-org/synapse/issues/11378)) -- Add experimental support for part of [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): allowing application services to masquerade as specific devices. ([\#11538](https://github.com/matrix-org/synapse/issues/11538)) -- Add admin API to get users' account data. ([\#11664](https://github.com/matrix-org/synapse/issues/11664)) -- Include the room topic in the stripped state included with invites and knocking. ([\#11666](https://github.com/matrix-org/synapse/issues/11666)) -- Send and handle cross-signing messages using the stable prefix. ([\#10520](https://github.com/matrix-org/synapse/issues/10520)) -- Support unprefixed versions of fallback key property names. ([\#11541](https://github.com/matrix-org/synapse/issues/11541)) - - -Bugfixes --------- - -- Fix a long-standing bug where relations from other rooms could be included in the bundled aggregations of an event. ([\#11516](https://github.com/matrix-org/synapse/issues/11516)) -- Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. ([\#11536](https://github.com/matrix-org/synapse/issues/11536), [\#11642](https://github.com/matrix-org/synapse/issues/11642)) -- Fix a bug introduced in Synapse 1.17.0 where a pusher created for an email with capital letters would fail to be created. ([\#11547](https://github.com/matrix-org/synapse/issues/11547)) -- Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11592](https://github.com/matrix-org/synapse/issues/11592), [\#11623](https://github.com/matrix-org/synapse/issues/11623)) -- Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11602](https://github.com/matrix-org/synapse/issues/11602)) -- Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. ([\#11632](https://github.com/matrix-org/synapse/issues/11632)) - - -Improved Documentation ----------------------- - -- Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. ([\#11267](https://github.com/matrix-org/synapse/issues/11267)) -- Document the usage of refresh tokens. ([\#11427](https://github.com/matrix-org/synapse/issues/11427)) -- Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553)) -- Add references for using Postgres to the Docker documentation. ([\#11640](https://github.com/matrix-org/synapse/issues/11640)) -- Fix the documentation link in newly-generated configuration files. ([\#11678](https://github.com/matrix-org/synapse/issues/11678)) -- Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. ([\#11680](https://github.com/matrix-org/synapse/issues/11680)) -- Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. ([\#11681](https://github.com/matrix-org/synapse/issues/11681)) - - -Deprecations and Removals -------------------------- - -- Replace `mock` package by its standard library version. ([\#11588](https://github.com/matrix-org/synapse/issues/11588)) -- Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633)) - - -Internal Changes ----------------- - -- Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#11243](https://github.com/matrix-org/synapse/issues/11243)) -- A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property. ([\#11331](https://github.com/matrix-org/synapse/issues/11331)) -- Add type hints to `synapse.appservice`. ([\#11360](https://github.com/matrix-org/synapse/issues/11360)) -- Add missing type hints to `synapse.config` module. ([\#11480](https://github.com/matrix-org/synapse/issues/11480)) -- Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint. ([\#11487](https://github.com/matrix-org/synapse/issues/11487)) -- Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. ([\#11503](https://github.com/matrix-org/synapse/issues/11503)) -- Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. ([\#11505](https://github.com/matrix-org/synapse/issues/11505), [\#11687](https://github.com/matrix-org/synapse/issues/11687)) -- Use `HTTPStatus` constants in place of literals in `tests.rest.client.test_auth`. ([\#11520](https://github.com/matrix-org/synapse/issues/11520)) -- Add a receipt types constant for `m.read`. ([\#11531](https://github.com/matrix-org/synapse/issues/11531)) -- Clean up `synapse.rest.admin`. ([\#11535](https://github.com/matrix-org/synapse/issues/11535)) -- Add missing `errcode` to `parse_string` and `parse_boolean`. ([\#11542](https://github.com/matrix-org/synapse/issues/11542)) -- Use `HTTPStatus` constants in place of literals in `synapse.http`. ([\#11543](https://github.com/matrix-org/synapse/issues/11543)) -- Add missing type hints to storage classes. ([\#11546](https://github.com/matrix-org/synapse/issues/11546), [\#11549](https://github.com/matrix-org/synapse/issues/11549), [\#11551](https://github.com/matrix-org/synapse/issues/11551), [\#11555](https://github.com/matrix-org/synapse/issues/11555), [\#11575](https://github.com/matrix-org/synapse/issues/11575), [\#11589](https://github.com/matrix-org/synapse/issues/11589), [\#11594](https://github.com/matrix-org/synapse/issues/11594), [\#11652](https://github.com/matrix-org/synapse/issues/11652), [\#11653](https://github.com/matrix-org/synapse/issues/11653), [\#11654](https://github.com/matrix-org/synapse/issues/11654), [\#11657](https://github.com/matrix-org/synapse/issues/11657)) -- Fix an inaccurate and misleading comment in the `/sync` code. ([\#11550](https://github.com/matrix-org/synapse/issues/11550)) -- Add missing type hints to `synapse.logging.context`. ([\#11556](https://github.com/matrix-org/synapse/issues/11556)) -- Stop populating unused database column `state_events.prev_state`. ([\#11558](https://github.com/matrix-org/synapse/issues/11558)) -- Minor efficiency improvements in event persistence. ([\#11560](https://github.com/matrix-org/synapse/issues/11560)) -- Add some safety checks that storage functions are used correctly. ([\#11564](https://github.com/matrix-org/synapse/issues/11564), [\#11580](https://github.com/matrix-org/synapse/issues/11580)) -- Make `get_device` return `None` if the device doesn't exist rather than raising an exception. ([\#11565](https://github.com/matrix-org/synapse/issues/11565)) -- Split the HTML parsing code from the URL preview resource code. ([\#11566](https://github.com/matrix-org/synapse/issues/11566)) -- Remove redundant `COALESCE()`s around `COUNT()`s in database queries. ([\#11570](https://github.com/matrix-org/synapse/issues/11570)) -- Add missing type hints to `synapse.http`. ([\#11571](https://github.com/matrix-org/synapse/issues/11571)) -- Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. ([\#11582](https://github.com/matrix-org/synapse/issues/11582)) -- Add type hints to `synapse/tests/rest/admin`. ([\#11590](https://github.com/matrix-org/synapse/issues/11590)) -- Drop end-of-life Python 3.6 and Postgres 9.6 from CI. ([\#11595](https://github.com/matrix-org/synapse/issues/11595)) -- Update black version and run it on all the files. ([\#11596](https://github.com/matrix-org/synapse/issues/11596)) -- Add opentracing type stubs and fix associated mypy errors. ([\#11603](https://github.com/matrix-org/synapse/issues/11603), [\#11622](https://github.com/matrix-org/synapse/issues/11622)) -- Improve OpenTracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607)) -- Improve OpenTracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618)) -- A number of improvements to opentracing support. ([\#11619](https://github.com/matrix-org/synapse/issues/11619)) -- Refactor the way that the `outlier` flag is set on events received over federation. ([\#11634](https://github.com/matrix-org/synapse/issues/11634)) -- Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638)) -- Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643)) -- Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574)) -- Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. ([\#11690](https://github.com/matrix-org/synapse/issues/11690)) -- Send the `Accept` header in HTTP requests made using `SimpleHttpClient.get_json`. ([\#11677](https://github.com/matrix-org/synapse/issues/11677)) -- Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. ([\#11696](https://github.com/matrix-org/synapse/issues/11696)) - - **Changelogs for older versions can be found [here](docs/changelogs/).** diff --git a/changelog.d/15846.misc b/changelog.d/15846.misc new file mode 100644 index 0000000000..f1c31d6663 --- /dev/null +++ b/changelog.d/15846.misc @@ -0,0 +1 @@ +Split out 2022 changes from the changelog so the rendered version in GitHub doesn't timeout as much. diff --git a/docs/changelogs/CHANGES-2022.md b/docs/changelogs/CHANGES-2022.md new file mode 100644 index 0000000000..81e2849516 --- /dev/null +++ b/docs/changelogs/CHANGES-2022.md @@ -0,0 +1,2766 @@ + +Synapse 1.74.0 (2022-12-20) +=========================== + +Improved Documentation +---------------------- + +- Add release note and update documentation regarding optional ICU support in user search. ([\#14712](https://github.com/matrix-org/synapse/issues/14712)) + + +Synapse 1.74.0rc1 (2022-12-13) +============================== + +Features +-------- + +- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464)) +- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525)) +- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619)) +- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576)) +- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598)) +- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642)) + + +Bugfixes +-------- + +- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604)) +- Suppress a spurious warning when `POST /rooms///`, `POST /join//` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600)) +- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621)) +- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625)) +- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631)) +- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632)) +- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637)) +- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643)) +- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650)) +- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664)) +- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670)) + + +Improved Documentation +---------------------- + +- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493)) +- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517)) +- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590)) +- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594)) +- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634)) + + +Internal Changes +---------------- + +- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255)) +- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474)) +- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528)) +- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548)) +- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549)) +- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568)) +- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591)) +- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602)) +- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607)) +- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610)) +- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611)) +- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612)) +- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613)) +- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614)) +- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615)) +- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616)) +- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636)) +- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645)) +- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656)) +- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657)) +- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658)) +- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659)) +- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660)) +- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661)) +- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668)) +- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671)) + + +Synapse 1.73.0 (2022-12-06) +=========================== + +Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details. + +No significant changes since 1.73.0rc2. + + +Synapse 1.73.0rc2 (2022-12-01) +============================== + +Bugfixes +-------- + +- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582)) + + +Synapse 1.73.0rc1 (2022-11-29) +============================== + +Features +-------- + +- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527)) +- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534)) +- Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917)) +- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms//timestamp_to_event?ts=&dir=`, `/_matrix/federation/v1/timestamp_to_event/?ts=&dir=`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471)) +- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510)) +- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524)) +- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580)) + + +Bugfixes +-------- + +- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149)) +- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393)) +- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466)) +- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537)) +- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574)) +- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565)) +- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490)) + + +Improved Documentation +---------------------- + +- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499)) + + +Deprecations and Removals +------------------------- + +- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538)) + + +Internal Changes +---------------- + +- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)). +- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468)) +- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476)) +- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496)) +([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573)) +- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403)) +- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404)) +- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408)) +- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515)) +- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449)) +- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469)) +- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479)) +- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487)) +- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516)) +- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522)) +- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526)) +- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571)) +- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575)) +- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)). + + +Synapse 1.72.0 (2022-11-22) +=========================== + +Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md). + +Bugfixes +-------- + +- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477)) + + +Synapse 1.72.0rc1 (2022-11-16) +============================== + +Features +-------- + +- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260)) +- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396)) +- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405)) +- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442)) + + +Bugfixes +-------- + +- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292)) +- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347)) +- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356)) +- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361)) +- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364)) +- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369)) +- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374)) +- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409)) +- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448)) +- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453)) + + +Updates to the Docker image +--------------------------- + +- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197)) +- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294)) + + +Improved Documentation +---------------------- + +- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370)) +- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293)) +- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297)) +- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414)) + + +Deprecations and Removals +------------------------- + +- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397)) + + +Internal Changes +---------------- + +- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812)) +- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455)) +- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313)) +- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324)) +- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339)) +- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346)) +- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351)) +- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375)) +- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394)) +- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410)) +- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411)) +- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417)) +- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433)) +- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434)) +- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451)) +- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461)) + + +Synapse 1.71.0 (2022-11-08) +=========================== + +Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default. +They will be removed altogether in Synapse 1.73.0. +If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details. + +**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+. + +No significant changes since 1.71.0rc2. + + +Synapse 1.71.0rc2 (2022-11-04) +============================== + +Improved Documentation +---------------------- + +- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360)) + + +Deprecations and Removals +------------------------- + +- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353)) + + +Internal Changes +---------------- + +- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812)) + + +Synapse 1.71.0rc1 (2022-11-01) +============================== + +Features +-------- + +- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414)) +- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311)) +- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804)) +- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652)) +- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897)) +- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205)) +- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262)) + + +Bugfixes +-------- + +- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422)) +- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927)) +- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958)) +- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064)) +- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243)) +- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258)) +- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291)) +- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304)) +- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323)) +- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336)) + + +Improved Documentation +---------------------- + +- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999)) +- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086)) +- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110)) +- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191)) + + +Internal Changes +---------------- + +- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595)) +- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844)) +- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910)) +- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084)) +- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096)) +- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228)) +- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229)) +- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230)) +- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249)) +- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268)) +- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287)) +- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303)) +- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305)) + +
+Dependency updates + +Runtime: + +- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278)) +- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271)) +- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274)) +- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331)) +- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270)) +- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330)) +- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277)) +- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279)) + +Tooling and CI: + +- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328)) +- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042)) +- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276)) +- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275)) +- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273)) +- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332)) +- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133)) +- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272)) +
+ +Synapse 1.70.1 (2022-10-28) +=========================== + +This release fixes some regressions that were discovered in 1.70.0. + +[#14300](https://github.com/matrix-org/synapse/issues/14300) +was previously reported to be a regression in 1.70.0 as well. However, we have +since concluded that it was limited to the reporter and thus have not needed +to include any fix for it in 1.70.1. + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301)) +- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314)) + + +Synapse 1.70.0 (2022-10-26) +=========================== + +No significant changes since 1.70.0rc2. + + +Synapse 1.70.0rc2 (2022-10-25) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248)) +- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280)) +- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283)) + + +Internal Changes +---------------- + +- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253)) +- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259)) + + +Synapse 1.70.0rc1 (2022-10-19) +============================== + +Features +-------- + +- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175)) +- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222)) +- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816)) +- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996)) +- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997)) +- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018)) +- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028)) +- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184)) +- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054)) +- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148)) +- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179)) +- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212)) + + +Bugfixes +-------- + +- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015)) +- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813)) +- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034)) +- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053)) +- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065)) +- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083)) +- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089)) +- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102)) +- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125)) +- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164)) +- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215)) + + +Updates to the Docker image +--------------------------- + +- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955)) +- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141)) +- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165)) +- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182)) +- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195)) + + +Improved Documentation +---------------------- + +- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077)) +- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078)) +- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081)) +- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087)) +- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093)) +- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107)) +- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124)) +- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145)) +- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189)) + + +Deprecations and Removals +------------------------- + +- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094)) +- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146)) + + +Internal Changes +---------------- + +- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991)) +- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006)) +- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007)) +- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033)) +- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063)) +- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072)) +- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092)) +- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095)) +- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097)) +- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109)) +- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126)) +- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130)) +- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132)) +- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134)) +- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141)) +- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142)) +- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144)) +- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155)) +- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198)) +- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214)) +- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224)) +- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217)) +- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221)) +- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227)) + + +Synapse 1.69.0 (2022-10-17) +=========================== + +Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0. +Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names. +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details. + + +No significant changes since 1.69.0rc4. + + +Synapse 1.69.0rc4 (2022-10-14) +============================== + +Bugfixes +-------- + +- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181)) + + +Updates to the Docker image +--------------------------- + +- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173)) + + +Synapse 1.69.0rc3 (2022-10-12) +============================== + +Bugfixes +-------- + +- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129)) +- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135)) +- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138)) + + +Internal Changes +---------------- + +- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085)) + + +Synapse 1.69.0rc2 (2022-10-06) +============================== + +Deprecations and Removals +------------------------- + +- Deprecate the `generate_short_term_login_token` method in favor of an async `create_login_token` method in the Module API. ([\#13842](https://github.com/matrix-org/synapse/issues/13842)) + + +Internal Changes +---------------- + +- Ensure Synapse v1.69 works with upcoming database changes in v1.70. ([\#14045](https://github.com/matrix-org/synapse/issues/14045)) +- Fix a bug introduced in Synapse v1.68.0 where messages could not be sent in rooms with non-integer `notifications` power level. ([\#14073](https://github.com/matrix-org/synapse/issues/14073)) +- Temporarily pin build-system requirements to workaround an incompatibility with poetry-core 1.3.0. This will be reverted before the v1.69.0 release proper, see [\#14079](https://github.com/matrix-org/synapse/issues/14079). ([\#14080](https://github.com/matrix-org/synapse/issues/14080)) + + +Synapse 1.69.0rc1 (2022-10-04) +============================== + +Features +-------- + +- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866)) +- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556)) +- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936)) +- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947)) +- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868)) +- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939)) +- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860)) +- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815)) +- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832)) +- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892)) +- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920)) +- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957)) + + +Bugfixes +-------- + +- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014)) +- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830)) +- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840)) +- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855)) +- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863)) +- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872)) +- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904)) +- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909)) +- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922)) +- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952)) +- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956)) +- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972)) +- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009)) +- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025)) +- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885)) +- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934)) + + +Improved Documentation +---------------------- + +- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772)) +- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818)) +- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836)) +- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870)) +- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930)) +- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915)) +- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928)) +- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931)) +- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974)) +- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003)) + + +Deprecations and Removals +------------------------- + +- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843)) +- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024)) + + +Internal Changes +---------------- + +- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800)) +- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889)) +- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787)) +- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792)) +- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796)) +- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809)) +- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823)) +- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839)) +- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850)) +- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859)) +- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867)) +- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873)) +- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013)) +- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876)) +- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879)) +- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888)) +- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890)) +- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905)) +- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913)) +- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914)) +- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924)) +- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993)) +- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960)) +- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966)) +- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969)) +- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992)) +- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976)) +- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027)) +- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012)) +- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020)) +- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023)) + + +Synapse 1.68.0 (2022-09-27) +=========================== + +Please note that Synapse will now refuse to start if configured to use a version of SQLite older than 3.27. + +In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler. +Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected. +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1680). + +Bugfixes +-------- + +- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909)) + + +Synapse 1.68.0rc2 (2022-09-23) +============================== + +Bugfixes +-------- + +- Fix building from packaged sdist. Broken in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866)) + + +Internal Changes +---------------- + +- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850)) +- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857)) +- Lock Rust dependencies' versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858)) + + +Synapse 1.68.0rc1 (2022-09-20) +============================== + +Features +-------- + +- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814)) +- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672)) +- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810)) +- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680)) +- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736)) +- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741)) +- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506)) +- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723)) +- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738)) +- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746)) +- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826)) +- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766)) +- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789)) +- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825)) + + +Improved Documentation +---------------------- + +- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480)) +- Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726)) +- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727)) +- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728)) +- Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785)) +- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794)) + + +Deprecations and Removals +------------------------- + +- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760)) +- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791)) + + +Internal Changes +---------------- + +- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778)) +- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162)) +- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703)) +- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706)) +- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725)) +- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718)) +- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724)) +- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729)) +- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730)) +- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745)) +- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748)) +- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750)) +- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752)) +- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753)) +- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754)) +- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756)) +- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759)) +- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761)) +- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765)) +- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770)) +- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780)) +- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784)) +- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788)) +- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795)) +- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798)) +- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802)) +- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808)) +- Fix Docker build when Rust .so has been built locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811)) +- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819)) +- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822)) +- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827)) + + +Synapse 1.67.0 (2022-09-13) +=========================== + +This release removes using the deprecated direct TCP replication configuration +for workers. Server admins should use Redis instead. See the [upgrade +notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). + +The minimum version of `poetry` supported for managing source checkouts is now +1.2.0. + +**Notice:** from the next major release (1.68.0) installing Synapse from a source +checkout will require a recent Rust compiler. Those using packages or +`pip install matrix-synapse` will not be affected. See the [upgrade +notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670). + +**Notice:** from the next major release (1.68.0), running Synapse with a SQLite +database will require SQLite version 3.27.0 or higher. (The [current minimum + version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).) +See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670) for more details. + + +No significant changes since 1.67.0rc1. + + +Synapse 1.67.0rc1 (2022-09-06) +============================== + +Features +-------- + +- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614)) +- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615)) +- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509)) +- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546)) +- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583)) +- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585)) +- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616)) +- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632)) +- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657)) +- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658)) +- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660)) +- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683)) +- Fix a bug introduced in Synapse 1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694)) + + +Updates to the Docker image +--------------------------- + +- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688)) + + +Improved Documentation +---------------------- + +- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602)) +- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617)) +- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640)) +- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645)) +- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656)) +- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665)) +- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678)) +- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701)) + + +Deprecations and Removals +------------------------- + +- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241)) +- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569)) +- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse 1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647)) +- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692)) + + +Internal Changes +---------------- + +- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483)) +- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540)) +- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600)) +- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575)) +- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586)) +- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588)) +- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591)) +- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592)) +- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597)) +- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603)) +- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605)) +- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606)) +- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608)) +- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627)) +- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639)) +- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662)) +- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671)) +- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679)) +- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689)) +- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693)) +- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697)) +- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698)) +- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712)) +- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713)) + + +Synapse 1.66.0 (2022-08-31) +=========================== + +No significant changes since 1.66.0rc2. + +This release removes the ability for homeservers to delegate email ownership +verification and password reset confirmation to identity servers. This removal +was originally planned for Synapse 1.64, but was later deferred until now. See +the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. + +Deployments with multiple workers should note that the direct TCP replication +configuration was deprecated in Synapse 1.18.0 and will be removed in Synapse +v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners) +type (not to be confused with the `replication` resource on the `http` listener +type) and the `worker_replication_port` config option will be removed . + +To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration), +then remove the TCP `replication` listener from config of the master and +`worker_replication_port` from worker config. Note that a HTTP listener with a +`replication` resource is still required. See the +[worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html) +for more details. + + +Synapse 1.66.0rc2 (2022-08-30) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.66.0rc1 where the new rate limit metrics were misreported (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649)) + + +Synapse 1.66.0rc1 (2022-08-23) +============================== + +Features +-------- + +- Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563)) +- Add forgotten status to [Room Details Admin API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#room-details-api). ([\#13503](https://github.com/matrix-org/synapse/issues/13503)) +- Add an experimental implementation for [MSC3852 (Expose user agents on `Device`)](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) +- Add `org.matrix.msc2716v4` experimental room version with updated content fields. Part of [MSC2716 (Importing history)](https://github.com/matrix-org/matrix-spec-proposals/pull/2716). ([\#13551](https://github.com/matrix-org/synapse/issues/13551)) +- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537)) +- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547)) + + +Bugfixes +-------- + +- Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514)) +- Fix a bug introduced in Synapse 1.21.0 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525)) +- Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566)) +- Fix a bug where the `opentracing.force_tracing_for_users` config option would not apply to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574)) + + +Improved Documentation +---------------------- + +- Add `openssl` example for generating registration HMAC digest. ([\#13472](https://github.com/matrix-org/synapse/issues/13472)) +- Tidy up Synapse's README. ([\#13491](https://github.com/matrix-org/synapse/issues/13491)) +- Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. ([\#13492](https://github.com/matrix-org/synapse/issues/13492)) +- Add a warning to retention documentation regarding the possibility of database corruption. ([\#13497](https://github.com/matrix-org/synapse/issues/13497)) +- Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. ([\#13515](https://github.com/matrix-org/synapse/issues/13515)) +- Add missing links in `user_consent` section of configuration manual. ([\#13536](https://github.com/matrix-org/synapse/issues/13536)) +- Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538)) + + +Deprecations and Removals +------------------------- + +- Remove the ability for homeservers to delegate email ownership verification + and password reset confirmation to identity servers. See [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details. + +Internal Changes +---------------- + +### Faster room joins + +- Update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459)) +- Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477)) +- Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531)) + +### Metrics and tracing + +- Allow use of both `@trace` and `@tag_args` stacked on the same function. ([\#13453](https://github.com/matrix-org/synapse/issues/13453)) +- Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489)) +- Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554)) +- Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533)) +- Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541)) +- Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584)) +- Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544)) +- Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545)) + +### Everything else + +- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024)) +- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471)) +- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474)) +- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488)) +- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485)) +- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493)) +- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502)) +- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543)) +- Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549)) +- Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558)) + + +Synapse 1.65.0 (2022-08-16) +=========================== + +No significant changes since 1.65.0rc2. + + +Synapse 1.65.0rc2 (2022-08-11) +============================== + +Internal Changes +---------------- + +- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501)) + + +Synapse 1.65.0rc1 (2022-08-09) +============================== + +Features +-------- + +- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273)) +- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343)) +- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370)) +- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428)) +- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429)) +- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441)) + + +Bugfixes +-------- + +- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470)) +- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365)) +- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374)) +- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392)) +- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408)) +- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353)) +- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413)) +- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432)) + + +Updates to the Docker image +--------------------------- + +- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372)) + + +Improved Documentation +---------------------- + +- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897)) +- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221)) +- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230)) +- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437)) +- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438)) +- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443)) +- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449)) +- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450)) +- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481)) + + +Internal Changes +---------------- + +- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978)) +- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160)) +- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213)) +- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346)) +- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355)) +- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368)) +- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383)) +- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393)) +- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397)) +- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403)) +- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431)) +- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416)) +- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420)) +- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435)) +- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439)) +- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442)) +- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455)) +- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447)) +- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452)) +- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460)) +- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469)) +- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467)) + + +Synapse 1.64.0 (2022-08-02) +=========================== + +No significant changes since 1.64.0rc2. + + +Deprecation Warning +------------------- + +Synapse 1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. + +If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf. +[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email) + + +Synapse 1.64.0rc2 (2022-07-29) +============================== + +This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse 1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) + + +Synapse 1.64.0rc1 (2022-07-26) +============================== + +This RC removed the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. + +We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu. + + +Features +-------- + +- Improve error messages when media thumbnails cannot be served. ([\#13038](https://github.com/matrix-org/synapse/issues/13038)) +- Allow pagination from remote event after discovering it from [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205)) +- Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) +- Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220)) +- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276)) +- Support Implicit TLS (TLS without using a STARTTLS upgrade, typically on port 465) for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the `enable_email_notifs` and `email_notifs_for_new_users` options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263)) +- Fix a bug introduced in Synapse 1.40.0 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270)) +- Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278)) +- Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296)) +- Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350)) +- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13284](https://github.com/matrix-org/synapse/issues/13284), [\#13352](https://github.com/matrix-org/synapse/issues/13352)) + + +Improved Documentation +---------------------- + +- Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231)) +- Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261)) +- Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271)) +- Document that certain config options were added or changed in Synapse 1.62. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) +- Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333)) +- Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338)) +- Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344)) + + +Deprecations and Removals +------------------------- + +- Drop tables that were formerly used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967)) +- Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192)) +- Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239)) +- Stop building `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326)) + + +Internal Changes +---------------- + +- Use lower transaction isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942)) +- Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943)) +- Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094)) +- Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172)) +- Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. ([\#13175](https://github.com/matrix-org/synapse/issues/13175)) +- Refactor receipts servlet logic to avoid duplicated code. ([\#13198](https://github.com/matrix-org/synapse/issues/13198)) +- Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215)) +- Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218)) +- Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224)) +- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. Contributed by @Vetchu. ([\#13240](https://github.com/matrix-org/synapse/issues/13240)) +- Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308)) +- Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251)) +- Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257)) +- Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258)) +- Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260)) +- Do not fail build if complement with workers fails. ([\#13266](https://github.com/matrix-org/synapse/issues/13266)) +- Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274)) +- Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279)) +- Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307)) +- Upgrade from Poetry 1.1.12 to 1.1.14, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285)) +- Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292)) +- Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297)) +- Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299)) +- Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. ([\#13300](https://github.com/matrix-org/synapse/issues/13300)) +- Remove unnecessary `json.dumps` from tests. ([\#13303](https://github.com/matrix-org/synapse/issues/13303)) +- Reduce memory usage of sending dummy events. ([\#13310](https://github.com/matrix-org/synapse/issues/13310)) +- Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. ([\#13311](https://github.com/matrix-org/synapse/issues/13311)) +- Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. ([\#13313](https://github.com/matrix-org/synapse/issues/13313)) +- Validate federation destinations and log an error if a destination is invalid. ([\#13318](https://github.com/matrix-org/synapse/issues/13318)) +- Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. ([\#13320](https://github.com/matrix-org/synapse/issues/13320)) +- Reduce memory usage of state caches. ([\#13323](https://github.com/matrix-org/synapse/issues/13323)) +- Reduce the amount of state we store in the `state_cache`. ([\#13324](https://github.com/matrix-org/synapse/issues/13324)) +- Add missing type hints to open tracing module. ([\#13328](https://github.com/matrix-org/synapse/issues/13328), [\#13345](https://github.com/matrix-org/synapse/issues/13345), [\#13362](https://github.com/matrix-org/synapse/issues/13362)) +- Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). ([\#13329](https://github.com/matrix-org/synapse/issues/13329), [\#13349](https://github.com/matrix-org/synapse/issues/13349)) +- When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. ([\#13342](https://github.com/matrix-org/synapse/issues/13342)) +- Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. ([\#13354](https://github.com/matrix-org/synapse/issues/13354)) + + +Synapse 1.63.1 (2022-07-20) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. ([\#13332](https://github.com/matrix-org/synapse/issues/13332)) + + +Synapse 1.63.0 (2022-07-19) +=========================== + +Improved Documentation +---------------------- + +- Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. ([\#13321](https://github.com/matrix-org/synapse/issues/13321)) + + +Synapse 1.63.0rc1 (2022-07-12) +============================== + +Features +-------- + +- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125)) +- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031)) +- Improve validation logic in the account data REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148)) + + +Bugfixes +-------- + +- Fix a long-standing bug where application services were not able to join remote federated rooms without a profile. ([\#13131](https://github.com/matrix-org/synapse/issues/13131)) +- Fix a long-standing bug where `_get_state_map_for_room` might raise errors when third party event rules callbacks are present. ([\#13174](https://github.com/matrix-org/synapse/issues/13174)) +- Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226)) +- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, when both [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) and [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) are enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236)) +- Fix a bug introduced in 1.62.0 where rows were not deleted from `event_push_actions` table on large servers. ([\#13194](https://github.com/matrix-org/synapse/issues/13194)) +- Fix a bug introduced in 1.62.0 where notification counts would get stuck after a highlighted message. ([\#13223](https://github.com/matrix-org/synapse/issues/13223)) +- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197)) +- Fix [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202)-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235)) + + +Updates to the Docker image +--------------------------- + +- Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. ([\#13207](https://github.com/matrix-org/synapse/issues/13207)) + + +Improved Documentation +---------------------- + +- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029)) +- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032)) +- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166)) +- Add documentation for homeserver usage statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086)) +- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212)) +- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139)) +- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132)) +- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116)) + + +Deprecations and Removals +------------------------- + +- Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. ([\#13200](https://github.com/matrix-org/synapse/issues/13200)) + + +Internal Changes +---------------- + +- Add type annotations to `synapse.logging`, `tests.server` and `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028), [\#13103](https://github.com/matrix-org/synapse/issues/13103), [\#13159](https://github.com/matrix-org/synapse/issues/13159), [\#13136](https://github.com/matrix-org/synapse/issues/13136)) +- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135)) +- Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044)) +- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158)) +- Add support to `complement.sh` for setting the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment variable. ([\#13152](https://github.com/matrix-org/synapse/issues/13152)) +- Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157)) +- Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127)) +- Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167)) +- Faster room joins: handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100)) +- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151)) +- Faster room joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144)) +- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113)) +- Avoid stripping line breaks from SQL sent to the database. ([\#13129](https://github.com/matrix-org/synapse/issues/13129)) +- Apply ratelimiting earlier in processing of `/send` requests. ([\#13134](https://github.com/matrix-org/synapse/issues/13134)) +- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145)) +- Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195)) +- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153)) +- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222)) +- Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209)) +- Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210)) +- More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211)) +- Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228)) + +Synapse 1.62.0 (2022-07-05) +=========================== + +No significant changes since 1.62.0rc3. + +Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse. + +## Security advisory + +The following issue is fixed in 1.62.0. + +* [GHSA-jhjh-776m-4765](https://github.com/matrix-org/synapse/security/advisories/GHSA-jhjh-776m-4765) / [CVE-2022-31152](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31152) + + Synapse instances prior to 1.62.0 did not implement the Matrix [event authorization rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules) correctly. An attacker could craft events which would be accepted by Synapse but not a spec-conformant server, potentially causing divergence in the room state between servers. + + Homeservers with federation disabled via the [`federation_domain_whitelist`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_domain_whitelist) config option are unaffected. + + Administrators of homeservers with federation enabled are advised to upgrade to v1.62.0 or higher. + + Fixed by [#13087](https://github.com/matrix-org/synapse/pull/13087) and [#13088](https://github.com/matrix-org/synapse/pull/13088). + +Synapse 1.62.0rc3 (2022-07-04) +============================== + +Bugfixes +-------- + +- Update the version of the [ldap3 plugin](https://github.com/matrix-org/matrix-synapse-ldap3/) included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on `packages.matrix.org` to 0.2.1. This fixes [a bug](https://github.com/matrix-org/matrix-synapse-ldap3/pull/163) with usernames containing uppercase characters. ([\#13156](https://github.com/matrix-org/synapse/issues/13156)) +- Fix a bug introduced in Synapse 1.62.0rc1 affecting unread counts for users on small servers. ([\#13168](https://github.com/matrix-org/synapse/issues/13168)) + + +Synapse 1.62.0rc2 (2022-07-01) +============================== + +Bugfixes +-------- + +- Fix unread counts for users on large servers. Introduced in v1.62.0rc1. ([\#13140](https://github.com/matrix-org/synapse/issues/13140)) +- Fix DB performance when deleting old push notifications. Introduced in v1.62.0rc1. ([\#13141](https://github.com/matrix-org/synapse/issues/13141)) + + +Synapse 1.62.0rc1 (2022-06-28) +============================== + +Features +-------- + +- Port the spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. ([\#12857](https://github.com/matrix-org/synapse/issues/12857), [\#13047](https://github.com/matrix-org/synapse/issues/13047)) +- Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. ([\#13035](https://github.com/matrix-org/synapse/issues/13035)) +- Add metrics measuring the CPU and DB time spent in state resolution. ([\#13036](https://github.com/matrix-org/synapse/issues/13036)) +- Speed up fetching of device list changes in `/sync` and `/keys/changes`. ([\#13045](https://github.com/matrix-org/synapse/issues/13045), [\#13098](https://github.com/matrix-org/synapse/issues/13098)) +- Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. ([\#13056](https://github.com/matrix-org/synapse/issues/13056)) + + +Bugfixes +-------- + +- Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939)) +- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973)) +- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979)) +- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced + in Synapse 1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991)) +- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018)) +- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041)) +- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088)) +- Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. ([\#13106](https://github.com/matrix-org/synapse/issues/13106)) + + +Improved Documentation +---------------------- + +- Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. ([\#12737](https://github.com/matrix-org/synapse/issues/12737)) +- Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. ([\#13022](https://github.com/matrix-org/synapse/issues/13022)) +- Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023)) +- Add instructions for running Complement with `gotestfmt`-formatted output locally. ([\#13073](https://github.com/matrix-org/synapse/issues/13073)) +- Update OpenTracing docs to reference the configuration manual rather than the configuration file. ([\#13076](https://github.com/matrix-org/synapse/issues/13076)) +- Update information on downstream Debian packages. ([\#13095](https://github.com/matrix-org/synapse/issues/13095)) +- Remove documentation for the Delete Group Admin API which no longer exists. ([\#13112](https://github.com/matrix-org/synapse/issues/13112)) + + +Deprecations and Removals +------------------------- + +- Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. ([\#13123](https://github.com/matrix-org/synapse/issues/13123)) + + +Internal Changes +---------------- + +- Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. ([\#12674](https://github.com/matrix-org/synapse/issues/12674)) +- Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. ([\#12738](https://github.com/matrix-org/synapse/issues/12738)) +- Merge the Complement testing Docker images into a single, multi-purpose image. ([\#12881](https://github.com/matrix-org/synapse/issues/12881), [\#13075](https://github.com/matrix-org/synapse/issues/13075)) +- Simplify the database schema for `event_edges`. ([\#12893](https://github.com/matrix-org/synapse/issues/12893)) +- Clean up the test code for client disconnection. ([\#12929](https://github.com/matrix-org/synapse/issues/12929)) +- Remove code generating comments in configuration. ([\#12941](https://github.com/matrix-org/synapse/issues/12941)) +- Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. ([\#12944](https://github.com/matrix-org/synapse/issues/12944)) +- Replace noop background updates with `DELETE` delta. ([\#12954](https://github.com/matrix-org/synapse/issues/12954), [\#13050](https://github.com/matrix-org/synapse/issues/13050)) +- Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12957](https://github.com/matrix-org/synapse/issues/12957)) +- Reduce the amount of state we pull from the DB. ([\#12963](https://github.com/matrix-org/synapse/issues/12963)) +- Enable testing against PostgreSQL databases in Complement CI. ([\#12965](https://github.com/matrix-org/synapse/issues/12965), [\#13034](https://github.com/matrix-org/synapse/issues/13034)) +- Fix an inaccurate comment. ([\#12969](https://github.com/matrix-org/synapse/issues/12969)) +- Remove the `delete_device` method and always call `delete_devices`. ([\#12970](https://github.com/matrix-org/synapse/issues/12970)) +- Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. ([\#12982](https://github.com/matrix-org/synapse/issues/12982)) +- Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. ([\#12984](https://github.com/matrix-org/synapse/issues/12984)) +- Add type hints to tests. ([\#12985](https://github.com/matrix-org/synapse/issues/12985), [\#13099](https://github.com/matrix-org/synapse/issues/13099)) +- Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. ([\#12986](https://github.com/matrix-org/synapse/issues/12986)) +- Fix documentation for running complement tests. ([\#12990](https://github.com/matrix-org/synapse/issues/12990)) +- Faster joins: add issue links to the TODO comments in the code. ([\#13004](https://github.com/matrix-org/synapse/issues/13004)) +- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13005](https://github.com/matrix-org/synapse/issues/13005), [\#13096](https://github.com/matrix-org/synapse/issues/13096), [\#13118](https://github.com/matrix-org/synapse/issues/13118)) +- Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. ([\#13011](https://github.com/matrix-org/synapse/issues/13011)) +- Modernize the `contrib/graph/` scripts. ([\#13013](https://github.com/matrix-org/synapse/issues/13013)) +- Remove redundant `room_version` parameters from event auth functions. ([\#13017](https://github.com/matrix-org/synapse/issues/13017)) +- Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. ([\#13021](https://github.com/matrix-org/synapse/issues/13021)) +- Add type annotations to `synapse.storage.databases.main.devices`. ([\#13025](https://github.com/matrix-org/synapse/issues/13025)) +- Set default `sync_response_cache_duration` to two minutes. ([\#13042](https://github.com/matrix-org/synapse/issues/13042)) +- Rename CI test runs. ([\#13046](https://github.com/matrix-org/synapse/issues/13046)) +- Increase timeout of complement CI test runs. ([\#13048](https://github.com/matrix-org/synapse/issues/13048)) +- Refactor entry points so that they all have a `main` function. ([\#13052](https://github.com/matrix-org/synapse/issues/13052)) +- Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. ([\#13054](https://github.com/matrix-org/synapse/issues/13054)) +- Add headers to individual options in config documentation to allow for linking. ([\#13055](https://github.com/matrix-org/synapse/issues/13055)) +- Make Complement CI logs easier to read. ([\#13057](https://github.com/matrix-org/synapse/issues/13057), [\#13058](https://github.com/matrix-org/synapse/issues/13058), [\#13069](https://github.com/matrix-org/synapse/issues/13069)) +- Don't instantiate modules with keyword arguments. ([\#13060](https://github.com/matrix-org/synapse/issues/13060)) +- Fix type checking errors against Twisted trunk. ([\#13061](https://github.com/matrix-org/synapse/issues/13061)) +- Allow MSC3030 `timestamp_to_event` calls from anyone on world-readable rooms. ([\#13062](https://github.com/matrix-org/synapse/issues/13062)) +- Add a CI job to check that schema deltas are in the correct folder. ([\#13063](https://github.com/matrix-org/synapse/issues/13063)) +- Avoid rechecking event auth rules which are independent of room state. ([\#13065](https://github.com/matrix-org/synapse/issues/13065)) +- Reduce the duplication of code that invokes the rate limiter. ([\#13070](https://github.com/matrix-org/synapse/issues/13070)) +- Add a Subject Alternative Name to the certificate generated for Complement tests. ([\#13071](https://github.com/matrix-org/synapse/issues/13071)) +- Add more tests for room upgrades. ([\#13074](https://github.com/matrix-org/synapse/issues/13074)) +- Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. ([\#13082](https://github.com/matrix-org/synapse/issues/13082)) +- Correctly report prometheus DB stats for `get_earliest_token_for_stats`. ([\#13085](https://github.com/matrix-org/synapse/issues/13085)) +- Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. ([\#13089](https://github.com/matrix-org/synapse/issues/13089)) +- Simplify the alias deletion logic as an application service. ([\#13093](https://github.com/matrix-org/synapse/issues/13093)) +- Add type annotations to `tests.test_server`. ([\#13124](https://github.com/matrix-org/synapse/issues/13124)) + + +Synapse 1.61.1 (2022-06-28) +=========================== + +This patch release fixes a security issue regarding URL previews, affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild. + +Server administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below. + +## Security advisory + +The following issue is fixed in 1.61.1. + +* [GHSA-22p3-qrh9-cx32](https://github.com/matrix-org/synapse/security/advisories/GHSA-22p3-qrh9-cx32) / [CVE-2022-31052](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31052) + + Synapse instances with the [`url_preview_enabled`](https://matrix-org.github.io/synapse/v1.61/usage/configuration/config_documentation.html#media-store) homeserver config option set to `true` are affected. URL previews of some web pages can lead to unbounded recursion, causing the request to either fail, or in some cases crash the running Synapse process. + + Requesting URL previews requires authentication. Nevertheless, it is possible to exploit this maliciously, either by malicious users on the homeserver, or by remote users sending URLs that a local user's client may automatically request a URL preview for. + + Homeservers with the `url_preview_enabled` configuration option set to `false` (the default) are unaffected. Instances with the `enable_media_repo` configuration option set to `false` are also unaffected, as this also disables URL preview functionality. + + Fixed by [fa1308061802ac7b7d20e954ba7372c5ac292333](https://github.com/matrix-org/synapse/commit/fa1308061802ac7b7d20e954ba7372c5ac292333). + +Synapse 1.61.0 (2022-06-14) +=========================== + +This release removes support for the non-standard feature known both as 'groups' and as 'communities', which have been superseded by *Spaces*. + +See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610) +for more details. + +Improved Documentation +---------------------- + +- Mention removed community/group worker endpoints in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610). Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023)) + + +Synapse 1.61.0rc1 (2022-06-07) +============================== + +Features +-------- + +- Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. ([\#12732](https://github.com/matrix-org/synapse/issues/12732), [\#12972](https://github.com/matrix-org/synapse/issues/12972), [\#12977](https://github.com/matrix-org/synapse/issues/12977)) +- Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. ([\#12740](https://github.com/matrix-org/synapse/issues/12740), [\#12859](https://github.com/matrix-org/synapse/issues/12859)) +- Update to the `check_event_for_spam` module callback: Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808)) +- Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. ([\#12838](https://github.com/matrix-org/synapse/issues/12838), [\#12917](https://github.com/matrix-org/synapse/issues/12917)) +- Support the new error code `ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED` from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#12845](https://github.com/matrix-org/synapse/issues/12845), [\#12923](https://github.com/matrix-org/synapse/issues/12923)) +- Add a configurable background job to delete stale devices. ([\#12855](https://github.com/matrix-org/synapse/issues/12855)) +- Improve URL previews for pages with empty elements. ([\#12951](https://github.com/matrix-org/synapse/issues/12951)) +- Allow updating a user's password using the admin API without logging out their devices. Contributed by @jcgruenhage. ([\#12952](https://github.com/matrix-org/synapse/issues/12952)) + + +Bugfixes +-------- + +- Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Application Service API specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). ([\#12746](https://github.com/matrix-org/synapse/issues/12746)) +- Implement [MSC3816](https://github.com/matrix-org/matrix-spec-proposals/pull/3816): sending the root event in a thread should count as having 'participated' in it. ([\#12766](https://github.com/matrix-org/synapse/issues/12766)) +- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12784](https://github.com/matrix-org/synapse/issues/12784)) +- Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. ([\#12829](https://github.com/matrix-org/synapse/issues/12829)) +- Fix a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. ([\#12832](https://github.com/matrix-org/synapse/issues/12832)) +- Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. ([\#12840](https://github.com/matrix-org/synapse/issues/12840)) +- Fix a bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). ([\#12843](https://github.com/matrix-org/synapse/issues/12843)) +- Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. ([\#12858](https://github.com/matrix-org/synapse/issues/12858)) +- Fix a bug introduced in Synapse 1.54.0 which could sometimes cause exceptions when handling federated traffic. ([\#12877](https://github.com/matrix-org/synapse/issues/12877)) +- Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation error. ([\#12889](https://github.com/matrix-org/synapse/issues/12889)) +- Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. ([\#12903](https://github.com/matrix-org/synapse/issues/12903)) +- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was a redaction of an event that has since been purged. ([\#12905](https://github.com/matrix-org/synapse/issues/12905)) +- Fix a potential memory leak when generating thumbnails. ([\#12932](https://github.com/matrix-org/synapse/issues/12932)) +- Fix a long-standing bug where a URL preview would break if the image failed to download. ([\#12950](https://github.com/matrix-org/synapse/issues/12950)) + + +Improved Documentation +---------------------- + +- Fix typographical errors in documentation. ([\#12863](https://github.com/matrix-org/synapse/issues/12863)) +- Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. ([\#12867](https://github.com/matrix-org/synapse/issues/12867)) + + +Deprecations and Removals +------------------------- + +- Remove support for the non-standard groups/communities feature from Synapse. ([\#12553](https://github.com/matrix-org/synapse/issues/12553), [\#12558](https://github.com/matrix-org/synapse/issues/12558), [\#12563](https://github.com/matrix-org/synapse/issues/12563), [\#12895](https://github.com/matrix-org/synapse/issues/12895), [\#12897](https://github.com/matrix-org/synapse/issues/12897), [\#12899](https://github.com/matrix-org/synapse/issues/12899), [\#12900](https://github.com/matrix-org/synapse/issues/12900), [\#12936](https://github.com/matrix-org/synapse/issues/12936), [\#12966](https://github.com/matrix-org/synapse/issues/12966)) +- Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. ([\#12908](https://github.com/matrix-org/synapse/issues/12908)) +- Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. ([\#12909](https://github.com/matrix-org/synapse/issues/12909)) +- Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. ([\#12910](https://github.com/matrix-org/synapse/issues/12910)) +- Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. ([\#12911](https://github.com/matrix-org/synapse/issues/12911)) + + +Internal Changes +---------------- + +- Test Synapse against Complement with workers. ([\#12810](https://github.com/matrix-org/synapse/issues/12810), [\#12933](https://github.com/matrix-org/synapse/issues/12933)) +- Reduce the amount of state we pull from the DB. ([\#12811](https://github.com/matrix-org/synapse/issues/12811), [\#12964](https://github.com/matrix-org/synapse/issues/12964)) +- Try other homeservers when re-syncing state for rooms with partial state. ([\#12812](https://github.com/matrix-org/synapse/issues/12812)) +- Resume state re-syncing for rooms with partial state after a Synapse restart. ([\#12813](https://github.com/matrix-org/synapse/issues/12813)) +- Remove Mutual Rooms' ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. ([\#12836](https://github.com/matrix-org/synapse/issues/12836)) +- Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. ([\#12846](https://github.com/matrix-org/synapse/issues/12846)) +- Remove `dont_notify` from the `.m.rule.room.server_acl` rule. ([\#12849](https://github.com/matrix-org/synapse/issues/12849)) +- Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12851](https://github.com/matrix-org/synapse/issues/12851)) +- Pull out less state when handling gaps in room DAG. ([\#12852](https://github.com/matrix-org/synapse/issues/12852), [\#12904](https://github.com/matrix-org/synapse/issues/12904)) +- Clean-up the push rules datastore. ([\#12856](https://github.com/matrix-org/synapse/issues/12856)) +- Correct a type annotation in the URL preview source code. ([\#12860](https://github.com/matrix-org/synapse/issues/12860)) +- Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). ([\#12865](https://github.com/matrix-org/synapse/issues/12865)) +- Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12866](https://github.com/matrix-org/synapse/issues/12866)) +- Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12868](https://github.com/matrix-org/synapse/issues/12868)) +- Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. ([\#12869](https://github.com/matrix-org/synapse/issues/12869)) +- Fix up the variable `state_store` naming. ([\#12871](https://github.com/matrix-org/synapse/issues/12871)) +- Faster room joins: when querying the current state of the room, wait for state to be populated. ([\#12872](https://github.com/matrix-org/synapse/issues/12872)) +- Avoid running queries which will never result in deletions. ([\#12879](https://github.com/matrix-org/synapse/issues/12879)) +- Use constants for EDU types. ([\#12884](https://github.com/matrix-org/synapse/issues/12884)) +- Reduce database load of `/sync` when presence is enabled. ([\#12885](https://github.com/matrix-org/synapse/issues/12885)) +- Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. ([\#12886](https://github.com/matrix-org/synapse/issues/12886)) +- Refactor receipt linearization code. ([\#12888](https://github.com/matrix-org/synapse/issues/12888)) +- Add type annotations to `synapse.logging.opentracing`. ([\#12894](https://github.com/matrix-org/synapse/issues/12894)) +- Remove PyNaCl occurrences directly used in Synapse code. ([\#12902](https://github.com/matrix-org/synapse/issues/12902)) +- Bump types-jsonschema from 4.4.1 to 4.4.6. ([\#12912](https://github.com/matrix-org/synapse/issues/12912)) +- Rename storage classes. ([\#12913](https://github.com/matrix-org/synapse/issues/12913)) +- Preparation for database schema simplifications: stop reading from `event_edges.room_id`. ([\#12914](https://github.com/matrix-org/synapse/issues/12914)) +- Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. ([\#12916](https://github.com/matrix-org/synapse/issues/12916)) +- Improve the logging when signature checks on events fail. ([\#12925](https://github.com/matrix-org/synapse/issues/12925)) + + +Synapse 1.60.0 (2022-05-31) +=========================== + +This release of Synapse adds a unique index to the `state_group_edges` table, in +order to prevent accidentally introducing duplicate information (for example, +because a database backup was restored multiple times). If your Synapse database +already has duplicate rows in this table, this could fail with an error and +require manual remediation. + +Additionally, the signature of the `check_event_for_spam` module callback has changed. +The previous signature has been deprecated and remains working for now. Module authors +should update their modules to use the new signature where possible. + +See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600) +for more details. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.60.0rc1 that would break some imports from `synapse.module_api`. ([\#12918](https://github.com/matrix-org/synapse/issues/12918)) + + +Synapse 1.60.0rc2 (2022-05-27) +============================== + +Features +-------- + +- Add an option allowing users to use their password to reauthenticate for privileged actions even though password login is disabled. ([\#12883](https://github.com/matrix-org/synapse/issues/12883)) + + +Bugfixes +-------- + +- Explicitly close `ijson` coroutines once we are done with them, instead of leaving the garbage collector to close them. ([\#12875](https://github.com/matrix-org/synapse/issues/12875)) + + +Internal Changes +---------------- + +- Improve URL previews by not including the content of media tags in the generated description. ([\#12887](https://github.com/matrix-org/synapse/issues/12887)) + + +Synapse 1.60.0rc1 (2022-05-24) +============================== + +Features +-------- + +- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513)) +- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618)) +- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623)) +- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809)) +- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673)) +- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701)) +- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718)) +- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744)) +- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792)) +- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611)) +- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683)) +- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687)) +- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696)) +- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713)) +- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721)) +- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747)) +- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762)) +- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770)) +- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779)) +- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794)) +- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803)) +- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823)) + + +Updates to the Docker image +--------------------------- + +- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853)) + + +Improved Documentation +---------------------- + +- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715)) +- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727)) +- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742)) +- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748)) +- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749)) +- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759)) +- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761)) +- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765)) +- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773)) +- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776)) +- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777)) +- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785)) + + +Deprecations and Removals +------------------------- + +- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709)) + + +Internal Changes +---------------- + +- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533)) +- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498)) +- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705)) +- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708)) +- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676)) +- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677)) +- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679)) +- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680)) +- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689)) +- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691)) +- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693)) +- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703)) +- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711)) +- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720)) +- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723)) +- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731)) +- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769)) +- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772)) +- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774)) +- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775)) +- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781)) +- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783)) +- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789)) +- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833)) +- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791)) +- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818)) +- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819)) +- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826)) +- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842)) + + +Synapse 1.59.1 (2022-05-18) +=========================== + +This release fixes a long-standing issue which could prevent Synapse's user directory for updating properly. + +Bugfixes +---------------- + +- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. Contributed by Nick @ Beeper. ([\#12762](https://github.com/matrix-org/synapse/issues/12762)) + + +Synapse 1.59.0 (2022-05-17) +=========================== + +Synapse 1.59 makes several changes that server administrators should be aware of: + +- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) +- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654)) + +See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details. + +Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597)) + + +Bugfixes +-------- + +- Fix DB performance regression introduced in Synapse 1.59.0rc2. ([\#12745](https://github.com/matrix-org/synapse/issues/12745)) + + +Synapse 1.59.0rc2 (2022-05-16) +============================== + +Note: this release candidate includes a performance regression which can cause database disruption. Other release candidates in the v1.59.0 series are not affected, and a fix will be included in the v1.59.0 final release. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was rejected. ([\#12729](https://github.com/matrix-org/synapse/issues/12729)) + + +Synapse 1.59.0rc1 (2022-05-10) +============================== + +Features +-------- + +- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507)) +- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670)) +- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406)) +- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452)) +- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654)) +- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526)) +- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601)) +- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273)) +- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544)) +- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570)) +- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580)) +- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594)) +- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633)) +- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657)) +- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656)) + + +Updates to the Docker image +--------------------------- + +- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541)) +- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573)) + + +Improved Documentation +---------------------- + +- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536)) +- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579)) +- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621)) +- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627)) +- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664)) + + +Deprecations and Removals +------------------------- + +- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596)) +- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from + [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597)) +- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613)) + + +Internal Changes +---------------- + +- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480)) +- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500)) +- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505)) +- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564)) +- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568)) +- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577)) +- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581)) +- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582)) +- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663)) +- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589)) +- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599)) +- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602)) +- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610)) +- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614)) +- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616)) +- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620)) +- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624)) +- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632)) +- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637)) +- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652)) +- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665)) +- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556)) +- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612)) + +### Typechecking + +- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356)) +- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485)) +- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531)) +- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576)) +- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608)) +- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650)) +- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666)) +- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667)) +- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671)) + +Synapse 1.58.1 (2022-05-05) +=========================== + +This patch release includes a fix to the Debian packages, installing the +`systemd` and `cache_memory` extra package groups, which were incorrectly +omitted in v1.58.0. This primarily prevented Synapse from starting +when the `systemd.journal.JournalHandler` log handler was configured. +See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information. + +Otherwise, no significant changes since 1.58.0. + + +Synapse 1.58.0 (2022-05-03) +=========================== + +As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61. + +No significant changes since 1.58.0rc2. + + +Synapse 1.58.0rc2 (2022-04-26) +============================== + +This release candidate fixes bugs related to Synapse 1.58.0rc1's logic for handling device list updates. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.58.0rc1 where the main process could consume excessive amounts of CPU and memory while handling sentry logging failures. ([\#12554](https://github.com/matrix-org/synapse/issues/12554)) +- Fix a bug introduced in Synapse 1.58.0rc1 where opentracing contexts were not correctly sent to whitelisted remote servers with device lists updates. ([\#12555](https://github.com/matrix-org/synapse/issues/12555)) + + +Internal Changes +---------------- + +- Reduce unnecessary work when handling remote device list updates. ([\#12557](https://github.com/matrix-org/synapse/issues/12557)) + + +Synapse 1.58.0rc1 (2022-04-26) +============================== + +Features +-------- + +- Implement [MSC3383](https://github.com/matrix-org/matrix-spec-proposals/pull/3383) for including the destination in server-to-server authentication headers. Contributed by @Bubu and @jcgruenhage for Famedly. ([\#11398](https://github.com/matrix-org/synapse/issues/11398)) +- Docker images and Debian packages from matrix.org now contain a locked set of Python dependencies, greatly improving build reproducibility. ([Board](https://github.com/orgs/matrix-org/projects/54), [\#11537](https://github.com/matrix-org/synapse/issues/11537)) +- Enable processing of device list updates asynchronously. ([\#12365](https://github.com/matrix-org/synapse/issues/12365), [\#12465](https://github.com/matrix-org/synapse/issues/12465)) +- Implement [MSC2815](https://github.com/matrix-org/matrix-spec-proposals/pull/2815) to allow room moderators to view redacted event content. Contributed by @tulir @ Beeper. ([\#12427](https://github.com/matrix-org/synapse/issues/12427)) +- Build Debian packages for Ubuntu 22.04 "Jammy Jellyfish". ([\#12543](https://github.com/matrix-org/synapse/issues/12543)) + + +Bugfixes +-------- + +- Prevent a sync request from removing a user's busy presence status. ([\#12213](https://github.com/matrix-org/synapse/issues/12213)) +- Fix bug with incremental sync missing events when rejoining/backfilling. Contributed by Nick @ Beeper. ([\#12319](https://github.com/matrix-org/synapse/issues/12319)) +- Fix a long-standing bug which incorrectly caused `GET /_matrix/client/v3/rooms/{roomId}/event/{eventId}` to return edited events rather than the original. ([\#12476](https://github.com/matrix-org/synapse/issues/12476)) +- Fix a bug introduced in Synapse 1.27.0 where the admin API for [deleting forward extremities](https://github.com/matrix-org/synapse/blob/erikj/fix_delete_event_response_count/docs/admin_api/rooms.md#deleting-forward-extremities) would always return a count of 1, no matter how many extremities were deleted. ([\#12496](https://github.com/matrix-org/synapse/issues/12496)) +- Fix a long-standing bug where the image thumbnails embedded into email notifications were broken. ([\#12510](https://github.com/matrix-org/synapse/issues/12510)) +- Fix a bug in the implementation of [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) where Synapse would use the field name `device_unused_fallback_keys`, rather than `device_unused_fallback_key_types`. ([\#12520](https://github.com/matrix-org/synapse/issues/12520)) +- Fix a bug introduced in Synapse 0.99.3 which could cause Synapse to consume large amounts of RAM when back-paginating in a large room. ([\#12522](https://github.com/matrix-org/synapse/issues/12522)) + + +Improved Documentation +---------------------- + +- Fix rendering of the documentation site when using the 'print' feature. ([\#12340](https://github.com/matrix-org/synapse/issues/12340)) +- Add a manual documenting config file options. ([\#12368](https://github.com/matrix-org/synapse/issues/12368), [\#12527](https://github.com/matrix-org/synapse/issues/12527)) +- Update documentation to reflect that both the `run_background_tasks_on` option and the options for moving stream writers off of the main process are no longer experimental. ([\#12451](https://github.com/matrix-org/synapse/issues/12451)) +- Update worker documentation and replace old `federation_reader` with `generic_worker`. ([\#12457](https://github.com/matrix-org/synapse/issues/12457)) +- Strongly recommend [Poetry](https://python-poetry.org/) for development. ([\#12475](https://github.com/matrix-org/synapse/issues/12475)) +- Add some example configurations for workers and update architectural diagram. ([\#12492](https://github.com/matrix-org/synapse/issues/12492)) +- Fix a broken link in `README.rst`. ([\#12495](https://github.com/matrix-org/synapse/issues/12495)) +- Add HAProxy delegation example with CORS headers to docs. ([\#12501](https://github.com/matrix-org/synapse/issues/12501)) +- Remove extraneous comma in User Admin API's device deletion section so that the example JSON is actually valid and works. Contributed by @olmari. ([\#12533](https://github.com/matrix-org/synapse/issues/12533)) + + +Deprecations and Removals +------------------------- + +- The groups/communities feature in Synapse is now disabled by default. ([\#12344](https://github.com/matrix-org/synapse/issues/12344)) +- Remove unstable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#12382](https://github.com/matrix-org/synapse/issues/12382)) + + +Internal Changes +---------------- + +- Preparation for faster-room-join work: start a background process to resynchronise the room state after a room join. ([\#12394](https://github.com/matrix-org/synapse/issues/12394)) +- Preparation for faster-room-join work: Implement a tracking mechanism to allow functions to wait for full room state to arrive. ([\#12399](https://github.com/matrix-org/synapse/issues/12399)) +- Remove an unstable identifier from [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083). ([\#12395](https://github.com/matrix-org/synapse/issues/12395)) +- Run CI in the locked [Poetry](https://python-poetry.org/) environment, and remove corresponding `tox` jobs. ([\#12425](https://github.com/matrix-org/synapse/issues/12425), [\#12434](https://github.com/matrix-org/synapse/issues/12434), [\#12438](https://github.com/matrix-org/synapse/issues/12438), [\#12441](https://github.com/matrix-org/synapse/issues/12441), [\#12449](https://github.com/matrix-org/synapse/issues/12449), [\#12478](https://github.com/matrix-org/synapse/issues/12478), [\#12514](https://github.com/matrix-org/synapse/issues/12514), [\#12472](https://github.com/matrix-org/synapse/issues/12472)) +- Change Mutual Rooms' `unstable_features` flag to `uk.half-shot.msc2666.mutual_rooms` which matches the current iteration of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666). ([\#12445](https://github.com/matrix-org/synapse/issues/12445)) +- Fix typo in the release script help string. ([\#12450](https://github.com/matrix-org/synapse/issues/12450)) +- Fix a minor typo in the Debian changelogs generated by the release script. ([\#12497](https://github.com/matrix-org/synapse/issues/12497)) +- Reintroduce the list of targets to the linter script, to avoid linting unwanted local-only directories during development. ([\#12455](https://github.com/matrix-org/synapse/issues/12455)) +- Limit length of `device_id` to less than 512 characters. ([\#12454](https://github.com/matrix-org/synapse/issues/12454)) +- Dockerfile-workers: reduce the amount we install in the image. ([\#12464](https://github.com/matrix-org/synapse/issues/12464)) +- Dockerfile-workers: give the master its own log config. ([\#12466](https://github.com/matrix-org/synapse/issues/12466)) +- complement-synapse-workers: factor out separate entry point script. ([\#12467](https://github.com/matrix-org/synapse/issues/12467)) +- Back out experimental implementation of [MSC2314](https://github.com/matrix-org/matrix-spec-proposals/pull/2314). ([\#12474](https://github.com/matrix-org/synapse/issues/12474)) +- Fix grammatical error in federation error response when the room version of a room is unknown. ([\#12483](https://github.com/matrix-org/synapse/issues/12483)) +- Remove unnecessary configuration overrides in tests. ([\#12511](https://github.com/matrix-org/synapse/issues/12511)) +- Refactor the relations code for clarity. ([\#12519](https://github.com/matrix-org/synapse/issues/12519)) +- Add type hints so `docker` and `stubs` directories pass `mypy --disallow-untyped-defs`. ([\#12528](https://github.com/matrix-org/synapse/issues/12528)) +- Update `delay_cancellation` to accept any awaitable, rather than just `Deferred`s. ([\#12468](https://github.com/matrix-org/synapse/issues/12468)) +- Handle cancellation in `EventsWorkerStore._get_events_from_cache_or_db`. ([\#12529](https://github.com/matrix-org/synapse/issues/12529)) + + +Synapse 1.57.1 (2022-04-20) +=========================== + +This is a patch release that only affects the Docker image. It is only of interest to administrators using [the LDAP module][LDAPModule] to authenticate their users. +If you have already upgraded to Synapse 1.57.0 without problem, then you have no need to upgrade to this patch release. + +[LDAPModule]: https://github.com/matrix-org/matrix-synapse-ldap3 + + +Updates to the Docker image +--------------------------- + +- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse 1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512)) + + +Synapse 1.57.0 (2022-04-19) +=========================== + +This version includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the way transaction IDs are managed for application services. If your deployment uses a dedicated worker for application service traffic, **it must be stopped** when the database is upgraded (which normally happens when the main process is upgraded), to ensure the change is made safely without any risk of reusing transaction IDs. + +See the [upgrade notes](https://github.com/matrix-org/synapse/blob/v1.57.0rc1/docs/upgrade.md#upgrading-to-v1570) for more details. + +No significant changes since 1.57.0rc1. + + +Synapse 1.57.0rc1 (2022-04-12) +============================== + +Features +-------- + +- Send device list changes to application services as specified by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202), using unstable prefixes. The `msc3202_transaction_extensions` experimental homeserver config option must be enabled and `org.matrix.msc3202: true` must be present in the application service registration file for device list changes to be sent. The "left" field is currently always empty. ([\#11881](https://github.com/matrix-org/synapse/issues/11881)) +- Optimise fetching large quantities of missing room state over federation. ([\#12040](https://github.com/matrix-org/synapse/issues/12040)) +- Offload the `update_client_ip` background job from the main process to the background worker, when using Redis-based replication. ([\#12251](https://github.com/matrix-org/synapse/issues/12251)) +- Move `update_client_ip` background job from the main process to the background worker. ([\#12252](https://github.com/matrix-org/synapse/issues/12252)) +- Add a module callback to react to new 3PID (email address, phone number) associations. ([\#12302](https://github.com/matrix-org/synapse/issues/12302)) +- Add a configuration option to remove a specific set of rooms from sync responses. ([\#12310](https://github.com/matrix-org/synapse/issues/12310)) +- Add a module callback to react to account data changes. ([\#12327](https://github.com/matrix-org/synapse/issues/12327)) +- Allow setting user admin status using the module API. Contributed by Famedly. ([\#12341](https://github.com/matrix-org/synapse/issues/12341)) +- Reduce overhead of restarting synchrotrons. ([\#12367](https://github.com/matrix-org/synapse/issues/12367), [\#12372](https://github.com/matrix-org/synapse/issues/12372)) +- Update `/messages` to use historic pagination tokens if no `from` query parameter is given. ([\#12370](https://github.com/matrix-org/synapse/issues/12370)) +- Add a module API for reading and writing global account data. ([\#12391](https://github.com/matrix-org/synapse/issues/12391)) +- Support the stable `v1` endpoint for `/relations`, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12403](https://github.com/matrix-org/synapse/issues/12403)) +- Include bundled aggregations in search results + ([MSC3666](https://github.com/matrix-org/matrix-spec-proposals/pull/3666)). ([\#12436](https://github.com/matrix-org/synapse/issues/12436)) + + +Bugfixes +-------- + +- Fix a long-standing bug where updates to the server notices user profile (display name/avatar URL) in the configuration would not be applied to pre-existing rooms. Contributed by Jorge Florian. ([\#12115](https://github.com/matrix-org/synapse/issues/12115)) +- Fix a long-standing bug where events from ignored users were still considered for bundled aggregations. ([\#12235](https://github.com/matrix-org/synapse/issues/12235), [\#12338](https://github.com/matrix-org/synapse/issues/12338)) +- Fix non-member state events not resolving for historical events when used in [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) `/batch_send` `state_events_at_start`. ([\#12329](https://github.com/matrix-org/synapse/issues/12329)) +- Fix a long-standing bug affecting URL previews that would generate a 500 response instead of a 403 if the previewed URL includes a port that isn't allowed by the relevant blacklist. ([\#12333](https://github.com/matrix-org/synapse/issues/12333)) +- Default to `private` room visibility rather than `public` when a client does not specify one, according to spec. ([\#12350](https://github.com/matrix-org/synapse/issues/12350)) +- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `limit` as a string. ([\#12364](https://github.com/matrix-org/synapse/issues/12364), [\#12410](https://github.com/matrix-org/synapse/issues/12410)) +- Fix a bug introduced in Synapse 1.49.0 which caused the `synapse_event_persisted_position` metric to have invalid values. ([\#12390](https://github.com/matrix-org/synapse/issues/12390)) + + +Updates to the Docker image +--------------------------- + +- Bundle locked versions of dependencies into the Docker image. ([\#12385](https://github.com/matrix-org/synapse/issues/12385), [\#12439](https://github.com/matrix-org/synapse/issues/12439)) +- Fix up healthcheck generation for workers docker image. ([\#12405](https://github.com/matrix-org/synapse/issues/12405)) + + +Improved Documentation +---------------------- + +- Clarify documentation for running SyTest against Synapse, including use of Postgres and worker mode. ([\#12271](https://github.com/matrix-org/synapse/issues/12271)) +- Document the behaviour of `LoggingTransaction.call_after` and `LoggingTransaction.call_on_exception` methods when transactions are retried. ([\#12315](https://github.com/matrix-org/synapse/issues/12315)) +- Update dead links in `check-newsfragment.sh` to point to the correct documentation URL. ([\#12331](https://github.com/matrix-org/synapse/issues/12331)) +- Upgrade the version of `mdbook` in CI to 0.4.17. ([\#12339](https://github.com/matrix-org/synapse/issues/12339)) +- Updates to the Room DAG concepts development document to clarify that we mark events as outliers because we don't have any state for them. ([\#12345](https://github.com/matrix-org/synapse/issues/12345)) +- Update the link to Redis pub/sub documentation in the workers documentation. ([\#12369](https://github.com/matrix-org/synapse/issues/12369)) +- Remove documentation for converting a legacy structured logging configuration to the new format. ([\#12392](https://github.com/matrix-org/synapse/issues/12392)) + + +Deprecations and Removals +------------------------- + +- Remove the unused and unstable `/aggregations` endpoint which was removed from [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12293](https://github.com/matrix-org/synapse/issues/12293)) + + +Internal Changes +---------------- + +- Remove lingering unstable references to MSC2403 (knocking). ([\#12165](https://github.com/matrix-org/synapse/issues/12165)) +- Avoid trying to calculate the state at outlier events. ([\#12191](https://github.com/matrix-org/synapse/issues/12191), [\#12316](https://github.com/matrix-org/synapse/issues/12316), [\#12330](https://github.com/matrix-org/synapse/issues/12330), [\#12332](https://github.com/matrix-org/synapse/issues/12332), [\#12409](https://github.com/matrix-org/synapse/issues/12409)) +- Omit sending "offline" presence updates to application services after they are initially configured. ([\#12193](https://github.com/matrix-org/synapse/issues/12193)) +- Switch to using a sequence to generate AS transaction IDs. Contributed by Nick @ Beeper. If running synapse with a dedicated appservice worker, this MUST be stopped before upgrading the main process and database. ([\#12209](https://github.com/matrix-org/synapse/issues/12209)) +- Add missing type hints for storage. ([\#12267](https://github.com/matrix-org/synapse/issues/12267)) +- Add missing type definitions for scripts in docker folder. Contributed by Jorge Florian. ([\#12280](https://github.com/matrix-org/synapse/issues/12280)) +- Move [MSC2654](https://github.com/matrix-org/matrix-doc/pull/2654) support behind an experimental configuration flag. ([\#12295](https://github.com/matrix-org/synapse/issues/12295)) +- Update docstrings to explain how to decipher live and historic pagination tokens. ([\#12317](https://github.com/matrix-org/synapse/issues/12317)) +- Add ground work for speeding up device list updates for users in large numbers of rooms. ([\#12321](https://github.com/matrix-org/synapse/issues/12321)) +- Fix typechecker problems exposed by signedjson 1.1.2. ([\#12326](https://github.com/matrix-org/synapse/issues/12326)) +- Remove the `tox` packaging job: it will be redundant once #11537 lands. ([\#12334](https://github.com/matrix-org/synapse/issues/12334)) +- Ignore `.envrc` for `direnv` users. ([\#12335](https://github.com/matrix-org/synapse/issues/12335)) +- Remove the (broadly unused, dev-only) dockerfile for pg tests. ([\#12336](https://github.com/matrix-org/synapse/issues/12336)) +- Remove redundant `get_success` calls in test code. ([\#12346](https://github.com/matrix-org/synapse/issues/12346)) +- Add type annotations for `tests/unittest.py`. ([\#12347](https://github.com/matrix-org/synapse/issues/12347)) +- Move single-use methods out of `TestCase`. ([\#12348](https://github.com/matrix-org/synapse/issues/12348)) +- Remove broken and unused development scripts. ([\#12349](https://github.com/matrix-org/synapse/issues/12349), [\#12351](https://github.com/matrix-org/synapse/issues/12351), [\#12355](https://github.com/matrix-org/synapse/issues/12355)) +- Convert `Linearizer` tests from `inlineCallbacks` to async. ([\#12353](https://github.com/matrix-org/synapse/issues/12353)) +- Update docstrings for `ReadWriteLock` tests. ([\#12354](https://github.com/matrix-org/synapse/issues/12354)) +- Refactor `Linearizer`, convert methods to async and use an async context manager. ([\#12357](https://github.com/matrix-org/synapse/issues/12357)) +- Fix a long-standing bug where `Linearizer`s could get stuck if a cancellation were to happen at the wrong time. ([\#12358](https://github.com/matrix-org/synapse/issues/12358)) +- Make `StreamToken.from_string` and `RoomStreamToken.parse` propagate cancellations instead of replacing them with `SynapseError`s. ([\#12366](https://github.com/matrix-org/synapse/issues/12366)) +- Add type hints to tests files. ([\#12371](https://github.com/matrix-org/synapse/issues/12371)) +- Allow specifying the Postgres database's port when running unit tests with Postgres. ([\#12376](https://github.com/matrix-org/synapse/issues/12376)) +- Remove temporary pin of signedjson<=1.1.1 that was added in Synapse 1.56.0. ([\#12379](https://github.com/matrix-org/synapse/issues/12379)) +- Add opentracing spans to calls to external cache. ([\#12380](https://github.com/matrix-org/synapse/issues/12380)) +- Lay groundwork for using `poetry` to manage Synapse's dependencies. ([\#12381](https://github.com/matrix-org/synapse/issues/12381), [\#12407](https://github.com/matrix-org/synapse/issues/12407), [\#12412](https://github.com/matrix-org/synapse/issues/12412), [\#12418](https://github.com/matrix-org/synapse/issues/12418)) +- Make missing `importlib_metadata` dependency explicit. ([\#12384](https://github.com/matrix-org/synapse/issues/12384), [\#12400](https://github.com/matrix-org/synapse/issues/12400)) +- Update type annotations for compatiblity with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389)) +- Remove support for the unstable identifiers specified in [MSC3288](https://github.com/matrix-org/matrix-doc/pull/3288). ([\#12398](https://github.com/matrix-org/synapse/issues/12398)) +- Add missing type hints to configuration classes. ([\#12402](https://github.com/matrix-org/synapse/issues/12402)) +- Add files used to build the Docker image used for complement testing into the Synapse repository. ([\#12404](https://github.com/matrix-org/synapse/issues/12404)) +- Do not include groups in the sync response when disabled. ([\#12408](https://github.com/matrix-org/synapse/issues/12408)) +- Improve type hints related to HTTP query parameters. ([\#12415](https://github.com/matrix-org/synapse/issues/12415)) +- Stop maintaining a list of lint targets. ([\#12420](https://github.com/matrix-org/synapse/issues/12420)) +- Make `synapse._scripts` pass type checks. ([\#12421](https://github.com/matrix-org/synapse/issues/12421), [\#12422](https://github.com/matrix-org/synapse/issues/12422)) +- Add some type hints to datastore. ([\#12423](https://github.com/matrix-org/synapse/issues/12423)) +- Enable certificate checking during complement tests. ([\#12435](https://github.com/matrix-org/synapse/issues/12435)) +- Explicitly specify the `tls` extra for Twisted dependency. ([\#12444](https://github.com/matrix-org/synapse/issues/12444)) + + +Synapse 1.56.0 (2022-04-05) +=========================== + +Synapse will now refuse to start up if open registration is enabled, in order to help mitigate +abuse across the federation. If you would like +to provide registration to anyone, consider adding [email](https://github.com/matrix-org/synapse/blob/8a519f8abc6de772167c2cca101d22ee2052fafc/docs/sample_config.yaml#L1285), +[recaptcha](https://matrix-org.github.io/synapse/v1.56/CAPTCHA_SETUP.html) +or [token-based](https://matrix-org.github.io/synapse/v1.56/usage/administration/admin_api/registration_tokens.html) verification +in order to prevent automated registration from bad actors. +This check can be disabled by setting the `enable_registration_without_verification` option in your +homeserver configuration file to `true`. More details are available in the +[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade.html#open-registration-without-verification-is-now-disabled-by-default). + +Synapse will additionally now refuse to start when using PostgreSQL with a non-`C` values for `COLLATE` and `CTYPE`, unless +the config flag `allow_unsafe_locale`, found in the database section of the configuration file, is set to `true`. See the +[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade#change-in-behaviour-for-postgresql-databases-with-unsafe-locale) +for details. + +Internal Changes +---------------- + +- Bump the version of `black` for compatibility with the latest `click` release. ([\#12320](https://github.com/matrix-org/synapse/issues/12320)) + + +Synapse 1.56.0rc1 (2022-03-29) +============================== + +Features +-------- + +- Allow modules to store already existing 3PID associations. ([\#12195](https://github.com/matrix-org/synapse/issues/12195)) +- Allow registering server administrators using the module API. Contributed by Famedly. ([\#12250](https://github.com/matrix-org/synapse/issues/12250)) + + +Bugfixes +-------- + +- Fix a long-standing bug which caused the `/_matrix/federation/v1/state` and `/_matrix/federation/v1/state_ids` endpoints to return incorrect or invalid data when called for an event which we have stored as an "outlier". ([\#12087](https://github.com/matrix-org/synapse/issues/12087)) +- Fix a long-standing bug where events from ignored users would still be considered for relations. ([\#12227](https://github.com/matrix-org/synapse/issues/12227), [\#12232](https://github.com/matrix-org/synapse/issues/12232), [\#12285](https://github.com/matrix-org/synapse/issues/12285)) +- Fix a bug introduced in Synapse 1.53.0 where an unnecessary query could be performed when fetching bundled aggregations for threads. ([\#12228](https://github.com/matrix-org/synapse/issues/12228)) +- Fix a bug introduced in Synapse 1.52.0 where admins could not deactivate and GDPR-erase a user if Synapse was configured with limits on avatars. ([\#12261](https://github.com/matrix-org/synapse/issues/12261)) + + +Improved Documentation +---------------------- + +- Fix the link to the module documentation in the legacy spam checker warning message. ([\#12231](https://github.com/matrix-org/synapse/issues/12231)) +- Remove incorrect prefixes in the worker documentation for some endpoints. ([\#12243](https://github.com/matrix-org/synapse/issues/12243)) +- Correct `check_username_for_spam` annotations and docs. ([\#12246](https://github.com/matrix-org/synapse/issues/12246)) +- Correct Authentik OpenID typo, and add notes on troubleshooting. Contributed by @IronTooch. ([\#12275](https://github.com/matrix-org/synapse/issues/12275)) +- HAProxy reverse proxy guide update to stop sending IPv4-mapped address to homeserver. Contributed by @villepeh. ([\#12279](https://github.com/matrix-org/synapse/issues/12279)) + + +Internal Changes +---------------- + +- Rename `shared_rooms` to `mutual_rooms` ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), as per proposal changes. ([\#12036](https://github.com/matrix-org/synapse/issues/12036)) +- Remove check on `update_user_directory` for shared rooms handler ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), and update/expand documentation. ([\#12038](https://github.com/matrix-org/synapse/issues/12038)) +- Refactor `create_new_client_event` to use a new parameter, `state_event_ids`, which accurately describes the usage with [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) instead of abusing `auth_event_ids`. ([\#12083](https://github.com/matrix-org/synapse/issues/12083), [\#12304](https://github.com/matrix-org/synapse/issues/12304)) +- Refuse to start if registration is enabled without email, captcha, or token-based verification unless the new config flag `enable_registration_without_verification` is set to `true`. ([\#12091](https://github.com/matrix-org/synapse/issues/12091), [\#12322](https://github.com/matrix-org/synapse/issues/12322)) +- Add tests for database transaction callbacks. ([\#12198](https://github.com/matrix-org/synapse/issues/12198)) +- Handle cancellation in `DatabasePool.runInteraction`. ([\#12199](https://github.com/matrix-org/synapse/issues/12199)) +- Add missing type hints for cache storage. ([\#12216](https://github.com/matrix-org/synapse/issues/12216)) +- Add missing type hints for storage. ([\#12248](https://github.com/matrix-org/synapse/issues/12248), [\#12255](https://github.com/matrix-org/synapse/issues/12255)) +- Add type hints to tests files. ([\#12224](https://github.com/matrix-org/synapse/issues/12224), [\#12240](https://github.com/matrix-org/synapse/issues/12240), [\#12256](https://github.com/matrix-org/synapse/issues/12256)) +- Use type stubs for `psycopg2`. ([\#12269](https://github.com/matrix-org/synapse/issues/12269)) +- Improve type annotations for `execute_values`. ([\#12311](https://github.com/matrix-org/synapse/issues/12311)) +- Clean-up logic around rebasing URLs for URL image previews. ([\#12219](https://github.com/matrix-org/synapse/issues/12219)) +- Use the `ignored_users` table in additional places instead of re-parsing the account data. ([\#12225](https://github.com/matrix-org/synapse/issues/12225)) +- Refactor the relations endpoints to add a `RelationsHandler`. ([\#12237](https://github.com/matrix-org/synapse/issues/12237)) +- Generate announcement links in the release script. ([\#12242](https://github.com/matrix-org/synapse/issues/12242)) +- Improve error message when dependencies check finds a broken installation. ([\#12244](https://github.com/matrix-org/synapse/issues/12244)) +- Compress metrics HTTP resource when enabled. Contributed by Nick @ Beeper. ([\#12258](https://github.com/matrix-org/synapse/issues/12258)) +- Refuse to start if the PostgreSQL database has a non-`C` locale, unless the config flag `allow_unsafe_db_locale` is set to true. ([\#12262](https://github.com/matrix-org/synapse/issues/12262), [\#12288](https://github.com/matrix-org/synapse/issues/12288)) +- Optionally include account validity expiration information to experimental [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) account status responses. ([\#12266](https://github.com/matrix-org/synapse/issues/12266)) +- Add a new cache `_get_membership_from_event_id` to speed up push rule calculations in large rooms. ([\#12272](https://github.com/matrix-org/synapse/issues/12272)) +- Re-enable Complement concurrency in CI. ([\#12283](https://github.com/matrix-org/synapse/issues/12283)) +- Remove unused test utilities. ([\#12291](https://github.com/matrix-org/synapse/issues/12291)) +- Enhance logging for inbound federation events. ([\#12301](https://github.com/matrix-org/synapse/issues/12301)) +- Fix compatibility with the recently-released Jinja 3.1. ([\#12313](https://github.com/matrix-org/synapse/issues/12313)) +- Avoid trying to calculate the state at outlier events. ([\#12314](https://github.com/matrix-org/synapse/issues/12314)) + + +Synapse 1.55.2 (2022-03-24) +=========================== + +This patch version reverts the earlier fixes from Synapse 1.55.1, which could cause problems in certain deployments, and instead adds a cap to the version of Jinja to be installed. Again, this is to fix an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, and again, deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected. + +Internal Changes +---------------- + +- Pin Jinja to <3.1.0, as Synapse fails to start with Jinja 3.1.0. ([\#12297](https://github.com/matrix-org/synapse/issues/12297)) +- Revert changes from 1.55.1 as they caused problems with older versions of Jinja ([\#12296](https://github.com/matrix-org/synapse/issues/12296)) + + +Synapse 1.55.1 (2022-03-24) +=========================== + +This is a patch release that fixes an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, released on March 24th, 2022. Deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected. + +Internal Changes +---------------- + +- Remove uses of the long-deprecated `jinja2.Markup` which would prevent Synapse from starting with Jinja 3.1.0 or above installed. ([\#12289](https://github.com/matrix-org/synapse/issues/12289)) + + +Synapse 1.55.0 (2022-03-22) +=========================== + +This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. + +This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details. + + +Internal Changes +---------------- + +- Tweak copy for default Single Sign-On account details template to better adhere to mobile app store guidelines. ([\#12265](https://github.com/matrix-org/synapse/issues/12265), [\#12260](https://github.com/matrix-org/synapse/issues/12260)) + + +Synapse 1.55.0rc1 (2022-03-15) +============================== + +Features +-------- + +- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) +- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) +- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) +- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212)) + + +Bugfixes +-------- + +- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse 1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) +- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) +- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse 1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) +- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234)) + + +Improved Documentation +---------------------- + +- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998)) +- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143)) +- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179)) +- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196)) +- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204)) + + +Deprecations and Removals +------------------------- + +- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** +- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) +- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) +- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) + + +Internal Changes +---------------- + +- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915)) +- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980)) +- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042)) +- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101)) +- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208)) +- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118)) +- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120)) +- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128)) +- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) +- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) +- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) +- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) +- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) +- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) +- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149)) +- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150)) +- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153)) +- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154)) +- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202)) +- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156)) +- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159)) +- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161)) +- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163)) +- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180)) +- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182)) +- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183)) +- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187)) +- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188)) +- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192)) +- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197)) +- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203)) +- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206)) +- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210)) +- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211)) + + +Synapse 1.54.0 (2022-03-08) +=========================== + +Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. +Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141)) +- Fix a bug introduced in Synapse 1.54.0rc1 where runtime dependency version checks would mistakenly check development dependencies if they were present and would not accept pre-release versions of dependencies. ([\#12129](https://github.com/matrix-org/synapse/issues/12129), [\#12177](https://github.com/matrix-org/synapse/issues/12177)) + + +Internal Changes +---------------- + +- Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) +- Relax the version guard for "packaging" added in [\#12088](https://github.com/matrix-org/synapse/issues/12088). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) + + +Synapse 1.54.0rc1 (2022-03-02) +============================== + + +Features +-------- + +- Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) +- Improve the generated URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) +- Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) +- Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) +- Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) +- Advertise Matrix 1.1 and 1.2 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020), ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) +- Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021)) +- Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058)) +- Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) +- Fix long-standing bug where the `get_rooms_for_user` cache was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) +- Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) +- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) +- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) +- Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) +- Fix occasional `Unhandled error in Deferred` error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) +- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) +- Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100)) +- Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105)) +- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. This reduces server load and load on the receiving device. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) + + +Updates to the Docker image +--------------------------- + +- The Docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) +- Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112)) + + +Improved Documentation +---------------------- + +- Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. ([\#11599](https://github.com/matrix-org/synapse/issues/11599)) +- Explain the meaning of spam checker callbacks' return values. ([\#12003](https://github.com/matrix-org/synapse/issues/12003)) +- Clarify information about external Identity Provider IDs. ([\#12004](https://github.com/matrix-org/synapse/issues/12004)) + + +Deprecations and Removals +------------------------- + +- Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. ([\#11865](https://github.com/matrix-org/synapse/issues/11865)) +- Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). ([\#12008](https://github.com/matrix-org/synapse/issues/12008)) +- Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. ([\#12018](https://github.com/matrix-org/synapse/issues/12018)) +- Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12073](https://github.com/matrix-org/synapse/issues/12073)) + + +Internal Changes +---------------- + +- Make the `get_room_version` method use `get_room_version_id` to benefit from caching. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) +- Remove unnecessary condition on knock -> leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) +- Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972)) +- Optimise calculating `device_list` changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) +- Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984)) +- Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991)) +- Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994)) +- Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996)) +- Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039)) +- Preparation for faster-room-join work: parse MSC3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) +- Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012)) +- Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013)) +- Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015)) +- Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016)) +- Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019)) +- Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025)) +- Upgrade Mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) +- Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070)) +- Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069)) +- After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041)) +- Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051)) +- Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059)) +- Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060)) +- Fix using the `complement.sh` script without specifying a directory or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) +- Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094)) +- Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068)) +- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088)) +- Use `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) +- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to `/versions`. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) +- Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106)) +- Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109)) +- Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12119](https://github.com/matrix-org/synapse/issues/12119)) + + +Synapse 1.53.0 (2022-02-22) +=========================== + +No significant changes since 1.53.0rc1. + + +Synapse 1.53.0rc1 (2022-02-15) +============================== + +Features +-------- + +- Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966)) +- Add a background database update to purge account data for deactivated users. ([\#11655](https://github.com/matrix-org/synapse/issues/11655)) +- Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. ([\#11837](https://github.com/matrix-org/synapse/issues/11837)) +- Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. ([\#11849](https://github.com/matrix-org/synapse/issues/11849)) +- Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. ([\#11854](https://github.com/matrix-org/synapse/issues/11854)) +- Stabilize support and remove unstable endpoints for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients must switch to the stable identifier and endpoint. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#stablisation-of-msc3231) for more information. ([\#11867](https://github.com/matrix-org/synapse/issues/11867)) +- Allow modules to retrieve the current instance's server name and worker name. ([\#11868](https://github.com/matrix-org/synapse/issues/11868)) +- Use a dedicated configurable rate limiter for 3PID invites. ([\#11892](https://github.com/matrix-org/synapse/issues/11892)) +- Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. ([\#11933](https://github.com/matrix-org/synapse/issues/11933), [\#11989](https://github.com/matrix-org/synapse/issues/11989)) +- Support the `dir` parameter on the `/relations` endpoint, per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#11941](https://github.com/matrix-org/synapse/issues/11941)) +- Experimental implementation of [MSC3706](https://github.com/matrix-org/matrix-doc/pull/3706): extensions to `/send_join` to support reduced response size. ([\#11967](https://github.com/matrix-org/synapse/issues/11967)) + + +Bugfixes +-------- + +- Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. ([\#11114](https://github.com/matrix-org/synapse/issues/11114)) +- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#11890](https://github.com/matrix-org/synapse/issues/11890)) +- Fix a long-standing bug where some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11930](https://github.com/matrix-org/synapse/issues/11930)) +- Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. ([\#11936](https://github.com/matrix-org/synapse/issues/11936)) +- Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. ([\#11952](https://github.com/matrix-org/synapse/issues/11952)) +- Require that modules register their callbacks using keyword arguments. ([\#11975](https://github.com/matrix-org/synapse/issues/11975)) +- Fix a long-standing bug where `M_WRONG_ROOM_KEYS_VERSION` errors would not include the specced `current_version` field. ([\#11988](https://github.com/matrix-org/synapse/issues/11988)) + + +Improved Documentation +---------------------- + +- Fix typo in User Admin API: unpind -> unbind. ([\#11859](https://github.com/matrix-org/synapse/issues/11859)) +- Document images returned by the User List Media Admin API can include those generated by URL previews. ([\#11862](https://github.com/matrix-org/synapse/issues/11862)) +- Remove outdated MSC1711 FAQ document. ([\#11907](https://github.com/matrix-org/synapse/issues/11907)) +- Correct the structured logging configuration example. Contributed by Brad Jones. ([\#11946](https://github.com/matrix-org/synapse/issues/11946)) +- Add information on the Synapse release cycle. ([\#11954](https://github.com/matrix-org/synapse/issues/11954)) +- Fix broken link in the README to the admin API for password reset. ([\#11955](https://github.com/matrix-org/synapse/issues/11955)) + + +Deprecations and Removals +------------------------- + +- Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error. ([\#11895](https://github.com/matrix-org/synapse/issues/11895)) +- Remove deprecated `user_may_create_room_with_invites` spam checker callback. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html#removal-of-user_may_create_room_with_invites) for more information. ([\#11950](https://github.com/matrix-org/synapse/issues/11950)) +- No longer build `.deb` packages for Ubuntu 21.04 Hirsute Hippo, which has now EOLed. ([\#11961](https://github.com/matrix-org/synapse/issues/11961)) + + +Internal Changes +---------------- + +- Enhance user registration test helpers to make them more useful for tests involving application services and devices. ([\#11615](https://github.com/matrix-org/synapse/issues/11615), [\#11616](https://github.com/matrix-org/synapse/issues/11616)) +- Improve performance when fetching bundled aggregations for multiple events. ([\#11660](https://github.com/matrix-org/synapse/issues/11660), [\#11752](https://github.com/matrix-org/synapse/issues/11752)) +- Fix type errors introduced by new annotations in the Prometheus Client library. ([\#11832](https://github.com/matrix-org/synapse/issues/11832)) +- Add missing type hints to replication code. ([\#11856](https://github.com/matrix-org/synapse/issues/11856), [\#11938](https://github.com/matrix-org/synapse/issues/11938)) +- Ensure that `opentracing` scopes are activated and closed at the right time. ([\#11869](https://github.com/matrix-org/synapse/issues/11869)) +- Improve opentracing for incoming federation requests. ([\#11870](https://github.com/matrix-org/synapse/issues/11870)) +- Improve internal docstrings in `synapse.util.caches`. ([\#11876](https://github.com/matrix-org/synapse/issues/11876)) +- Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes. ([\#11878](https://github.com/matrix-org/synapse/issues/11878)) +- Convert `ApplicationServiceTestCase` to use `simple_async_mock`. ([\#11880](https://github.com/matrix-org/synapse/issues/11880)) +- Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled. ([\#11884](https://github.com/matrix-org/synapse/issues/11884)) +- Disable coverage calculation for olddeps build. ([\#11888](https://github.com/matrix-org/synapse/issues/11888)) +- Preparation to support sending device list updates to application services. ([\#11905](https://github.com/matrix-org/synapse/issues/11905)) +- Add a test that checks users receive their own device list updates down `/sync`. ([\#11909](https://github.com/matrix-org/synapse/issues/11909)) +- Run Complement tests sequentially. ([\#11910](https://github.com/matrix-org/synapse/issues/11910)) +- Various refactors to the application service notifier code. ([\#11911](https://github.com/matrix-org/synapse/issues/11911), [\#11912](https://github.com/matrix-org/synapse/issues/11912)) +- Tests: replace mocked `Authenticator` with the real thing. ([\#11913](https://github.com/matrix-org/synapse/issues/11913)) +- Various refactors to the typing notifications code. ([\#11914](https://github.com/matrix-org/synapse/issues/11914)) +- Use the proper type for the `Content-Length` header in the `UploadResource`. ([\#11927](https://github.com/matrix-org/synapse/issues/11927)) +- Remove an unnecessary ignoring of type hints due to fixes in upstream packages. ([\#11939](https://github.com/matrix-org/synapse/issues/11939)) +- Add missing type hints. ([\#11953](https://github.com/matrix-org/synapse/issues/11953)) +- Fix an import cycle in `synapse.event_auth`. ([\#11965](https://github.com/matrix-org/synapse/issues/11965)) +- Unpin `frozendict` but exclude the known bad version 2.1.2. ([\#11969](https://github.com/matrix-org/synapse/issues/11969)) +- Prepare for rename of default Complement branch. ([\#11971](https://github.com/matrix-org/synapse/issues/11971)) +- Fetch Synapse's version using a helper from `matrix-common`. ([\#11979](https://github.com/matrix-org/synapse/issues/11979)) + + +Synapse 1.52.0 (2022-02-08) +=========================== + +No significant changes since 1.52.0rc1. + +Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0) +has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) +within the Twisted library. We do not believe Synapse is affected by this vulnerability, +though we advise server administrators who installed Synapse via pip to upgrade Twisted +with `pip install --upgrade Twisted treq` as a matter of good practice. The Docker image +`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the +updated library. + + +Synapse 1.52.0rc1 (2022-02-01) +============================== + +Features +-------- + +- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789)) +- Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) +- Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658)) +- Add a config flag to inhibit `M_USER_IN_USE` during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) +- Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790)) +- Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846)) + + +Bugfixes +-------- + +- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612)) +- Fix a long-standing bug when previewing Reddit URLs which do not contain an image. ([\#11767](https://github.com/matrix-org/synapse/issues/11767)) +- Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. ([\#11784](https://github.com/matrix-org/synapse/issues/11784)) +- Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. ([\#11798](https://github.com/matrix-org/synapse/issues/11798)) +- Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. ([\#11827](https://github.com/matrix-org/synapse/issues/11827)) + + +Improved Documentation +---------------------- + +- Update pypi installation docs to indicate that we now support Python 3.10. ([\#11820](https://github.com/matrix-org/synapse/issues/11820)) +- Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. ([\#11821](https://github.com/matrix-org/synapse/issues/11821)) +- Remove not needed old table of contents in documentation. ([\#11860](https://github.com/matrix-org/synapse/issues/11860)) +- Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. ([\#11861](https://github.com/matrix-org/synapse/issues/11861)) + + +Deprecations and Removals +------------------------- + +- Drop support for Python 3.6, which is EOL. ([\#11683](https://github.com/matrix-org/synapse/issues/11683)) +- Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. ([\#11843](https://github.com/matrix-org/synapse/issues/11843)) + + +Internal Changes +---------------- + +- Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. ([\#11792](https://github.com/matrix-org/synapse/issues/11792)) +- Add `FrozenEvent.get_state_key` and use it in a couple of places. ([\#11793](https://github.com/matrix-org/synapse/issues/11793)) +- Preparation for database schema simplifications: stop reading from `event_reference_hashes`. ([\#11794](https://github.com/matrix-org/synapse/issues/11794)) +- Drop unused table `public_room_list_stream`. ([\#11795](https://github.com/matrix-org/synapse/issues/11795)) +- Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. ([\#11799](https://github.com/matrix-org/synapse/issues/11799), [\#11847](https://github.com/matrix-org/synapse/issues/11847)) +- Docker: skip the initial amd64-only build and go straight to multiarch. ([\#11810](https://github.com/matrix-org/synapse/issues/11810)) +- Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811)) +- Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813)) +- Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815)) +- Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830)) +- Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823)) +- Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834)) +- Minor performance improvement in room state lookup. ([\#11836](https://github.com/matrix-org/synapse/issues/11836)) +- Fix some indentation inconsistencies in the sample config. ([\#11838](https://github.com/matrix-org/synapse/issues/11838)) +- Add type hints to `tests/rest/admin`. ([\#11851](https://github.com/matrix-org/synapse/issues/11851)) + + +Synapse 1.51.0 (2022-01-25) +=========================== + +No significant changes since 1.51.0rc2. + +Synapse 1.51.0 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1510). + +Synapse 1.51.0rc2 (2022-01-24) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) + + +Synapse 1.50.2 (2022-01-24) +=========================== + +This release includes the same bugfix as Synapse 1.51.0rc2. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) + + +Synapse 1.51.0rc1 (2022-01-21) +============================== + +Features +-------- + +- Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. ([\#11561](https://github.com/matrix-org/synapse/issues/11561), [\#11749](https://github.com/matrix-org/synapse/issues/11749), [\#11757](https://github.com/matrix-org/synapse/issues/11757)) +- Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11577](https://github.com/matrix-org/synapse/issues/11577)) +- Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. ([\#11672](https://github.com/matrix-org/synapse/issues/11672)) +- Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. ([\#11675](https://github.com/matrix-org/synapse/issues/11675), [\#11770](https://github.com/matrix-org/synapse/issues/11770)) + + +Bugfixes +-------- + +- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events + received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530)) +- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587)) +- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593)) +- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791)) +- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667)) +- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669)) +- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse 1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695)) +- Fix a bug introduced in Synapse 1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745)) +- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737)) +- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775)) +- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786)) + + +Improved Documentation +---------------------- + +- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) +- Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715)) +- Document that the minimum supported PostgreSQL version is now 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) +- Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) +- Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) +- Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) +- Update documentation for configuring login with Facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) +- Update installation instructions to note that Python 3.6 is no longer supported. ([\#11781](https://github.com/matrix-org/synapse/issues/11781)) + + +Deprecations and Removals +------------------------- + +- Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682)) +- Remove `python_twisted_reactor_pending_calls` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724)) +- Remove the `password_hash` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576)) +- **Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783))** + + +Internal Changes +---------------- + +- Run `pyupgrade --py37-plus --keep-percent-format` on Synapse. ([\#11685](https://github.com/matrix-org/synapse/issues/11685)) +- Use buildkit's cache feature to speed up docker builds. ([\#11691](https://github.com/matrix-org/synapse/issues/11691)) +- Use `auto_attribs` and native type hints for attrs classes. ([\#11692](https://github.com/matrix-org/synapse/issues/11692), [\#11768](https://github.com/matrix-org/synapse/issues/11768)) +- Remove debug logging for #4422, which has been closed since Synapse 0.99. ([\#11693](https://github.com/matrix-org/synapse/issues/11693)) +- Remove fallback code for Python 2. ([\#11699](https://github.com/matrix-org/synapse/issues/11699)) +- Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. ([\#11701](https://github.com/matrix-org/synapse/issues/11701)) +- Add the option to write SQLite test dbs to disk when running tests. ([\#11702](https://github.com/matrix-org/synapse/issues/11702)) +- Improve Complement test output for Gitub Actions. ([\#11707](https://github.com/matrix-org/synapse/issues/11707)) +- Fix docstring on `add_account_data_for_user`. ([\#11716](https://github.com/matrix-org/synapse/issues/11716)) +- Complement environment variable name change and update `.gitignore`. ([\#11718](https://github.com/matrix-org/synapse/issues/11718)) +- Simplify calculation of Prometheus metrics for garbage collection. ([\#11723](https://github.com/matrix-org/synapse/issues/11723)) +- Improve accuracy of `python_twisted_reactor_tick_time` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724), [\#11771](https://github.com/matrix-org/synapse/issues/11771)) +- Minor efficiency improvements when inserting many values into the database. ([\#11742](https://github.com/matrix-org/synapse/issues/11742)) +- Invite PR authors to give themselves credit in the changelog. ([\#11744](https://github.com/matrix-org/synapse/issues/11744)) +- Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). ([\#11760](https://github.com/matrix-org/synapse/issues/11760)) +- Remove `log_function` utility function and its uses. ([\#11761](https://github.com/matrix-org/synapse/issues/11761)) +- Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. ([\#11765](https://github.com/matrix-org/synapse/issues/11765)) +- Allow overriding complement commit using `COMPLEMENT_REF`. ([\#11766](https://github.com/matrix-org/synapse/issues/11766)) +- Add some comments and type annotations for `_update_outliers_txn`. ([\#11776](https://github.com/matrix-org/synapse/issues/11776)) + + +Synapse 1.50.1 (2022-01-18) +=========================== + +This release fixes a bug in Synapse 1.50.0 that could prevent clients from being able to connect to Synapse if the `webclient` resource was enabled. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763). + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the `webclient` resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764)) + + +Synapse 1.50.0 (2022-01-18) +=========================== + +**This release contains a critical bug that may prevent clients from being able to connect. +As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to +to 1.50.1. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).** + +Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. + +No significant changes since 1.50.0rc2. + + +Synapse 1.50.0rc2 (2022-01-14) +============================== + +This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729)) +- Fix a bug introduced in Synapse 1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730)) + + +Improved Documentation +---------------------- + +- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) + + +Internal Changes +---------------- + +- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714)) + + +Synapse 1.50.0rc1 (2022-01-05) +============================== + + +Features +-------- + +- Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419). ([\#11378](https://github.com/matrix-org/synapse/issues/11378)) +- Add experimental support for part of [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): allowing application services to masquerade as specific devices. ([\#11538](https://github.com/matrix-org/synapse/issues/11538)) +- Add admin API to get users' account data. ([\#11664](https://github.com/matrix-org/synapse/issues/11664)) +- Include the room topic in the stripped state included with invites and knocking. ([\#11666](https://github.com/matrix-org/synapse/issues/11666)) +- Send and handle cross-signing messages using the stable prefix. ([\#10520](https://github.com/matrix-org/synapse/issues/10520)) +- Support unprefixed versions of fallback key property names. ([\#11541](https://github.com/matrix-org/synapse/issues/11541)) + + +Bugfixes +-------- + +- Fix a long-standing bug where relations from other rooms could be included in the bundled aggregations of an event. ([\#11516](https://github.com/matrix-org/synapse/issues/11516)) +- Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. ([\#11536](https://github.com/matrix-org/synapse/issues/11536), [\#11642](https://github.com/matrix-org/synapse/issues/11642)) +- Fix a bug introduced in Synapse 1.17.0 where a pusher created for an email with capital letters would fail to be created. ([\#11547](https://github.com/matrix-org/synapse/issues/11547)) +- Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11592](https://github.com/matrix-org/synapse/issues/11592), [\#11623](https://github.com/matrix-org/synapse/issues/11623)) +- Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11602](https://github.com/matrix-org/synapse/issues/11602)) +- Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. ([\#11632](https://github.com/matrix-org/synapse/issues/11632)) + + +Improved Documentation +---------------------- + +- Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. ([\#11267](https://github.com/matrix-org/synapse/issues/11267)) +- Document the usage of refresh tokens. ([\#11427](https://github.com/matrix-org/synapse/issues/11427)) +- Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553)) +- Add references for using Postgres to the Docker documentation. ([\#11640](https://github.com/matrix-org/synapse/issues/11640)) +- Fix the documentation link in newly-generated configuration files. ([\#11678](https://github.com/matrix-org/synapse/issues/11678)) +- Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. ([\#11680](https://github.com/matrix-org/synapse/issues/11680)) +- Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. ([\#11681](https://github.com/matrix-org/synapse/issues/11681)) + + +Deprecations and Removals +------------------------- + +- Replace `mock` package by its standard library version. ([\#11588](https://github.com/matrix-org/synapse/issues/11588)) +- Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633)) + + +Internal Changes +---------------- + +- Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#11243](https://github.com/matrix-org/synapse/issues/11243)) +- A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property. ([\#11331](https://github.com/matrix-org/synapse/issues/11331)) +- Add type hints to `synapse.appservice`. ([\#11360](https://github.com/matrix-org/synapse/issues/11360)) +- Add missing type hints to `synapse.config` module. ([\#11480](https://github.com/matrix-org/synapse/issues/11480)) +- Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint. ([\#11487](https://github.com/matrix-org/synapse/issues/11487)) +- Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. ([\#11503](https://github.com/matrix-org/synapse/issues/11503)) +- Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. ([\#11505](https://github.com/matrix-org/synapse/issues/11505), [\#11687](https://github.com/matrix-org/synapse/issues/11687)) +- Use `HTTPStatus` constants in place of literals in `tests.rest.client.test_auth`. ([\#11520](https://github.com/matrix-org/synapse/issues/11520)) +- Add a receipt types constant for `m.read`. ([\#11531](https://github.com/matrix-org/synapse/issues/11531)) +- Clean up `synapse.rest.admin`. ([\#11535](https://github.com/matrix-org/synapse/issues/11535)) +- Add missing `errcode` to `parse_string` and `parse_boolean`. ([\#11542](https://github.com/matrix-org/synapse/issues/11542)) +- Use `HTTPStatus` constants in place of literals in `synapse.http`. ([\#11543](https://github.com/matrix-org/synapse/issues/11543)) +- Add missing type hints to storage classes. ([\#11546](https://github.com/matrix-org/synapse/issues/11546), [\#11549](https://github.com/matrix-org/synapse/issues/11549), [\#11551](https://github.com/matrix-org/synapse/issues/11551), [\#11555](https://github.com/matrix-org/synapse/issues/11555), [\#11575](https://github.com/matrix-org/synapse/issues/11575), [\#11589](https://github.com/matrix-org/synapse/issues/11589), [\#11594](https://github.com/matrix-org/synapse/issues/11594), [\#11652](https://github.com/matrix-org/synapse/issues/11652), [\#11653](https://github.com/matrix-org/synapse/issues/11653), [\#11654](https://github.com/matrix-org/synapse/issues/11654), [\#11657](https://github.com/matrix-org/synapse/issues/11657)) +- Fix an inaccurate and misleading comment in the `/sync` code. ([\#11550](https://github.com/matrix-org/synapse/issues/11550)) +- Add missing type hints to `synapse.logging.context`. ([\#11556](https://github.com/matrix-org/synapse/issues/11556)) +- Stop populating unused database column `state_events.prev_state`. ([\#11558](https://github.com/matrix-org/synapse/issues/11558)) +- Minor efficiency improvements in event persistence. ([\#11560](https://github.com/matrix-org/synapse/issues/11560)) +- Add some safety checks that storage functions are used correctly. ([\#11564](https://github.com/matrix-org/synapse/issues/11564), [\#11580](https://github.com/matrix-org/synapse/issues/11580)) +- Make `get_device` return `None` if the device doesn't exist rather than raising an exception. ([\#11565](https://github.com/matrix-org/synapse/issues/11565)) +- Split the HTML parsing code from the URL preview resource code. ([\#11566](https://github.com/matrix-org/synapse/issues/11566)) +- Remove redundant `COALESCE()`s around `COUNT()`s in database queries. ([\#11570](https://github.com/matrix-org/synapse/issues/11570)) +- Add missing type hints to `synapse.http`. ([\#11571](https://github.com/matrix-org/synapse/issues/11571)) +- Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. ([\#11582](https://github.com/matrix-org/synapse/issues/11582)) +- Add type hints to `synapse/tests/rest/admin`. ([\#11590](https://github.com/matrix-org/synapse/issues/11590)) +- Drop end-of-life Python 3.6 and Postgres 9.6 from CI. ([\#11595](https://github.com/matrix-org/synapse/issues/11595)) +- Update black version and run it on all the files. ([\#11596](https://github.com/matrix-org/synapse/issues/11596)) +- Add opentracing type stubs and fix associated mypy errors. ([\#11603](https://github.com/matrix-org/synapse/issues/11603), [\#11622](https://github.com/matrix-org/synapse/issues/11622)) +- Improve OpenTracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607)) +- Improve OpenTracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618)) +- A number of improvements to opentracing support. ([\#11619](https://github.com/matrix-org/synapse/issues/11619)) +- Refactor the way that the `outlier` flag is set on events received over federation. ([\#11634](https://github.com/matrix-org/synapse/issues/11634)) +- Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638)) +- Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643)) +- Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574)) +- Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. ([\#11690](https://github.com/matrix-org/synapse/issues/11690)) +- Send the `Accept` header in HTTP requests made using `SimpleHttpClient.get_json`. ([\#11677](https://github.com/matrix-org/synapse/issues/11677)) +- Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. ([\#11696](https://github.com/matrix-org/synapse/issues/11696)) + + +**Changelogs for older versions can be found [here](CHANGES-2021.md).** From 9345361c6beacff8cf68d3c9c5c55dd19b3f930c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:33:27 +0200 Subject: [PATCH 170/562] Bump authlib from 1.2.0 to 1.2.1 (#15864) Bumps [authlib](https://github.com/lepture/authlib) from 1.2.0 to 1.2.1. - [Release notes](https://github.com/lepture/authlib/releases) - [Changelog](https://github.com/lepture/authlib/blob/master/docs/changelog.rst) - [Commits](https://github.com/lepture/authlib/compare/v1.2.0...v1.2.1) --- updated-dependencies: - dependency-name: authlib dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index ee19c246f3..6fa15dfaaa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -53,13 +53,13 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte [[package]] name = "authlib" -version = "1.2.0" +version = "1.2.1" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = true python-versions = "*" files = [ - {file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"}, - {file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"}, + {file = "Authlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:c88984ea00149a90e3537c964327da930779afa4564e354edfd98410bea01911"}, + {file = "Authlib-1.2.1.tar.gz", hash = "sha256:421f7c6b468d907ca2d9afede256f068f87e34d23dd221c07d13d4c234726afb"}, ] [package.dependencies] From aea94ca8cd2ec04c115357e7395704a8cee195ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:33:47 +0200 Subject: [PATCH 171/562] Bump importlib-metadata from 6.6.0 to 6.7.0 (#15865) Bumps [importlib-metadata](https://github.com/python/importlib_metadata) from 6.6.0 to 6.7.0. - [Release notes](https://github.com/python/importlib_metadata/releases) - [Changelog](https://github.com/python/importlib_metadata/blob/main/NEWS.rst) - [Commits](https://github.com/python/importlib_metadata/compare/v6.6.0...v6.7.0) --- updated-dependencies: - dependency-name: importlib-metadata dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6fa15dfaaa..38913bad36 100644 --- a/poetry.lock +++ b/poetry.lock @@ -837,13 +837,13 @@ files = [ [[package]] name = "importlib-metadata" -version = "6.6.0" +version = "6.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"}, - {file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"}, + {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, + {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, ] [package.dependencies] @@ -853,7 +853,7 @@ zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] [[package]] name = "importlib-resources" From 411ba44790d98b2244dadd17281db2daa3ccfb7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:34:20 +0200 Subject: [PATCH 172/562] Bump types-pyopenssl from 23.2.0.0 to 23.2.0.1 (#15866) Bumps [types-pyopenssl](https://github.com/python/typeshed) from 23.2.0.0 to 23.2.0.1. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pyopenssl dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 38913bad36..87ed919e43 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2964,13 +2964,13 @@ files = [ [[package]] name = "types-pyopenssl" -version = "23.2.0.0" +version = "23.2.0.1" description = "Typing stubs for pyOpenSSL" optional = false python-versions = "*" files = [ - {file = "types-pyOpenSSL-23.2.0.0.tar.gz", hash = "sha256:43e307e8dfb3a7a8208a19874ca060305f460c529d4eaca8a2669ea89499f244"}, - {file = "types_pyOpenSSL-23.2.0.0-py3-none-any.whl", hash = "sha256:ba803a99440b0c2e9ab4e197084aeefc55bdfe8a580d367b2aa4210810a21240"}, + {file = "types-pyOpenSSL-23.2.0.1.tar.gz", hash = "sha256:beeb5d22704c625a1e4b6dc756355c5b4af0b980138b702a9d9f932acf020903"}, + {file = "types_pyOpenSSL-23.2.0.1-py3-none-any.whl", hash = "sha256:0568553f104466f1b8e0db3360fbe6770137d02e21a1a45c209bf2b1b03d90d4"}, ] [package.dependencies] From a587de96b81e945677622ff0e60549b00f2b1689 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:34:57 +0200 Subject: [PATCH 173/562] Bump sentry-sdk from 1.25.1 to 1.26.0 (#15867) Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.25.1 to 1.26.0. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.25.1...1.26.0) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 87ed919e43..2802e42cf4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2301,13 +2301,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.25.1" +version = "1.26.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.25.1.tar.gz", hash = "sha256:aa796423eb6a2f4a8cd7a5b02ba6558cb10aab4ccdc0537f63a47b038c520c38"}, - {file = "sentry_sdk-1.25.1-py2.py3-none-any.whl", hash = "sha256:79afb7c896014038e358401ad1d36889f97a129dfa8031c49b3f238cd1aa3935"}, + {file = "sentry-sdk-1.26.0.tar.gz", hash = "sha256:760e4fb6d01c994110507133e08ecd4bdf4d75ee4be77f296a3579796cf73134"}, + {file = "sentry_sdk-1.26.0-py2.py3-none-any.whl", hash = "sha256:0c9f858337ec3781cf4851972ef42bba8c9828aea116b0dbed8f38c5f9a1896c"}, ] [package.dependencies] From 53aa26eddc772a6719bf0da64b0684c333294d05 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 3 Jul 2023 10:38:57 +0000 Subject: [PATCH 174/562] Add a timeout that aborts any Postgres statement taking more than 1 hour. (#15853) * Add a timeout to Postgres statements * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15853.misc | 1 + synapse/storage/engines/postgres.py | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelog.d/15853.misc diff --git a/changelog.d/15853.misc b/changelog.d/15853.misc new file mode 100644 index 0000000000..3e9516b1ad --- /dev/null +++ b/changelog.d/15853.misc @@ -0,0 +1 @@ +Add a timeout that aborts any Postgres statement taking more than 1 hour. \ No newline at end of file diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index b350f57ccb..05a72dc554 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -45,6 +45,15 @@ class PostgresEngine( psycopg2.extensions.register_adapter(bytes, _disable_bytes_adapter) self.synchronous_commit: bool = database_config.get("synchronous_commit", True) + # Set the statement timeout to 1 hour by default. + # Any query taking more than 1 hour should probably be considered a bug; + # most of the time this is a sign that work needs to be split up or that + # some degenerate query plan has been created and the client has probably + # timed out/walked off anyway. + # This is in milliseconds. + self.statement_timeout: Optional[int] = database_config.get( + "statement_timeout", 60 * 60 * 1000 + ) self._version: Optional[int] = None # unknown as yet self.isolation_level_map: Mapping[int, int] = { @@ -157,6 +166,10 @@ class PostgresEngine( if not self.synchronous_commit: cursor.execute("SET synchronous_commit TO OFF") + # Abort really long-running statements and turn them into errors. + if self.statement_timeout is not None: + cursor.execute("SET statement_timeout TO ?", (self.statement_timeout,)) + cursor.close() db_conn.commit() From cd8b73aa97cb6d06079f67aa73ff24679038d7c6 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 3 Jul 2023 10:39:52 +0000 Subject: [PATCH 175/562] Fix the `devenv up` configuration which was ignoring the config overrides. (#15854) * Fix use of config override directory in `devenv up` `--config-directory` is for the generate config script; `-c` is for usage * Add homeserver config override directory to gitignore * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- .gitignore | 1 + changelog.d/15854.misc | 1 + flake.nix | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15854.misc diff --git a/.gitignore b/.gitignore index 8cf504324b..a89f149ec1 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,7 @@ __pycache__/ /logs /media_store/ /uploads +/homeserver-config-overrides.d # For direnv users /.envrc diff --git a/changelog.d/15854.misc b/changelog.d/15854.misc new file mode 100644 index 0000000000..8c940dd9c5 --- /dev/null +++ b/changelog.d/15854.misc @@ -0,0 +1 @@ +Fix the `devenv up` configuration which was ignoring the config overrides. \ No newline at end of file diff --git a/flake.nix b/flake.nix index 8b7c0e6a5b..bb42c9ff9b 100644 --- a/flake.nix +++ b/flake.nix @@ -178,7 +178,7 @@ EOF ''; # Start synapse when `devenv up` is run. - processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d"; + processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d"; # Define the perl modules we require to run SyTest. # From 07d7cbfe69c239a7ffe5668c1166799370eef0d6 Mon Sep 17 00:00:00 2001 From: pacien Date: Mon, 3 Jul 2023 16:39:38 +0200 Subject: [PATCH 176/562] devices: use combined ANY clause for faster cleanup (#15861) Old device entries for the same user were being removed in individual SQL commands, making the batch take way longer than necessary. This combines the commands into a single one with a IN/ANY clause. Example of log entry before the change, regularly observed with "log_min_duration_statement = 10000" in PostgreSQL's config: LOG: duration: 42538.282 ms statement: DELETE FROM device_lists_stream WHERE user_id = '@someone' AND device_id = 'someid1' AND stream_id < 123456789 ; DELETE FROM device_lists_stream WHERE user_id = '@someone' AND device_id = 'someid2' AND stream_id < 123456789 ; [repeated for each device ID of that user, potentially a lot...] With the patch applied on my instance for the past couple of days, I no longer notice overly long statements of that particular kind. Signed-off-by: pacien --- changelog.d/15861.misc | 1 + synapse/storage/databases/main/devices.py | 14 +++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15861.misc diff --git a/changelog.d/15861.misc b/changelog.d/15861.misc new file mode 100644 index 0000000000..6f320eab81 --- /dev/null +++ b/changelog.d/15861.misc @@ -0,0 +1 @@ +Optimised cleanup of old entries in device_lists_stream. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index f677d048aa..d9df437e51 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1950,12 +1950,16 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): # Delete older entries in the table, as we really only care about # when the latest change happened. - txn.execute_batch( - """ + cleanup_obsolete_stmt = """ DELETE FROM device_lists_stream - WHERE user_id = ? AND device_id = ? AND stream_id < ? - """, - [(user_id, device_id, min_stream_id) for device_id in device_ids], + WHERE user_id = ? AND stream_id < ? AND %s + """ + device_ids_clause, device_ids_args = make_in_list_sql_clause( + txn.database_engine, "device_id", device_ids + ) + txn.execute( + cleanup_obsolete_stmt % (device_ids_clause,), + [user_id, min_stream_id] + device_ids_args, ) self.db_pool.simple_insert_many_txn( From 670d590f8a167d909f8d76a5ecbd240d9609d2b0 Mon Sep 17 00:00:00 2001 From: Paarth Shah Date: Tue, 4 Jul 2023 00:33:24 -0700 Subject: [PATCH 177/562] Pin `pydantic` to <2.0.0 (#15862) Signed-off-by: Paarth Shah --- changelog.d/15862.bugfix | 3 ++ poetry.lock | 74 ++++++++++++++++++++-------------------- pyproject.toml | 3 +- 3 files changed, 42 insertions(+), 38 deletions(-) create mode 100644 changelog.d/15862.bugfix diff --git a/changelog.d/15862.bugfix b/changelog.d/15862.bugfix new file mode 100644 index 0000000000..8eb6aa9a7f --- /dev/null +++ b/changelog.d/15862.bugfix @@ -0,0 +1,3 @@ +Pin `pydantic` to ^=1.7.4 to avoid backwards-incompatible API changes from the 2.0.0 release. +Resolves https://github.com/matrix-org/synapse/issues/15858. +Contributed by @PaarthShah. diff --git a/poetry.lock b/poetry.lock index 2802e42cf4..9aaf5c7de7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1829,47 +1829,47 @@ files = [ [[package]] name = "pydantic" -version = "1.10.9" +version = "1.10.10" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e692dec4a40bfb40ca530e07805b1208c1de071a18d26af4a2a0d79015b352ca"}, - {file = "pydantic-1.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c52eb595db83e189419bf337b59154bdcca642ee4b2a09e5d7797e41ace783f"}, - {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:939328fd539b8d0edf244327398a667b6b140afd3bf7e347cf9813c736211896"}, - {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b48d3d634bca23b172f47f2335c617d3fcb4b3ba18481c96b7943a4c634f5c8d"}, - {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f0b7628fb8efe60fe66fd4adadd7ad2304014770cdc1f4934db41fe46cc8825f"}, - {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1aa5c2410769ca28aa9a7841b80d9d9a1c5f223928ca8bec7e7c9a34d26b1d4"}, - {file = "pydantic-1.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:eec39224b2b2e861259d6f3c8b6290d4e0fbdce147adb797484a42278a1a486f"}, - {file = "pydantic-1.10.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d111a21bbbfd85c17248130deac02bbd9b5e20b303338e0dbe0faa78330e37e0"}, - {file = "pydantic-1.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e9aec8627a1a6823fc62fb96480abe3eb10168fd0d859ee3d3b395105ae19a7"}, - {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07293ab08e7b4d3c9d7de4949a0ea571f11e4557d19ea24dd3ae0c524c0c334d"}, - {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee829b86ce984261d99ff2fd6e88f2230068d96c2a582f29583ed602ef3fc2c"}, - {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b466a23009ff5cdd7076eb56aca537c745ca491293cc38e72bf1e0e00de5b91"}, - {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7847ca62e581e6088d9000f3c497267868ca2fa89432714e21a4fb33a04d52e8"}, - {file = "pydantic-1.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:7845b31959468bc5b78d7b95ec52fe5be32b55d0d09983a877cca6aedc51068f"}, - {file = "pydantic-1.10.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:517a681919bf880ce1dac7e5bc0c3af1e58ba118fd774da2ffcd93c5f96eaece"}, - {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67195274fd27780f15c4c372f4ba9a5c02dad6d50647b917b6a92bf00b3d301a"}, - {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2196c06484da2b3fded1ab6dbe182bdabeb09f6318b7fdc412609ee2b564c49a"}, - {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6257bb45ad78abacda13f15bde5886efd6bf549dd71085e64b8dcf9919c38b60"}, - {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3283b574b01e8dbc982080d8287c968489d25329a463b29a90d4157de4f2baaf"}, - {file = "pydantic-1.10.9-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8bbaf4013b9a50e8100333cc4e3fa2f81214033e05ac5aa44fa24a98670a29"}, - {file = "pydantic-1.10.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9cd67fb763248cbe38f0593cd8611bfe4b8ad82acb3bdf2b0898c23415a1f82"}, - {file = "pydantic-1.10.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f50e1764ce9353be67267e7fd0da08349397c7db17a562ad036aa7c8f4adfdb6"}, - {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73ef93e5e1d3c8e83f1ff2e7fdd026d9e063c7e089394869a6e2985696693766"}, - {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128d9453d92e6e81e881dd7e2484e08d8b164da5507f62d06ceecf84bf2e21d3"}, - {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ad428e92ab68798d9326bb3e5515bc927444a3d71a93b4a2ca02a8a5d795c572"}, - {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fab81a92f42d6d525dd47ced310b0c3e10c416bbfae5d59523e63ea22f82b31e"}, - {file = "pydantic-1.10.9-cp38-cp38-win_amd64.whl", hash = "sha256:963671eda0b6ba6926d8fc759e3e10335e1dc1b71ff2a43ed2efd6996634dafb"}, - {file = "pydantic-1.10.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:970b1bdc6243ef663ba5c7e36ac9ab1f2bfecb8ad297c9824b542d41a750b298"}, - {file = "pydantic-1.10.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7e1d5290044f620f80cf1c969c542a5468f3656de47b41aa78100c5baa2b8276"}, - {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83fcff3c7df7adff880622a98022626f4f6dbce6639a88a15a3ce0f96466cb60"}, - {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0da48717dc9495d3a8f215e0d012599db6b8092db02acac5e0d58a65248ec5bc"}, - {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0a2aabdc73c2a5960e87c3ffebca6ccde88665616d1fd6d3db3178ef427b267a"}, - {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9863b9420d99dfa9c064042304868e8ba08e89081428a1c471858aa2af6f57c4"}, - {file = "pydantic-1.10.9-cp39-cp39-win_amd64.whl", hash = "sha256:e7c9900b43ac14110efa977be3da28931ffc74c27e96ee89fbcaaf0b0fe338e1"}, - {file = "pydantic-1.10.9-py3-none-any.whl", hash = "sha256:6cafde02f6699ce4ff643417d1a9223716ec25e228ddc3b436fe7e2d25a1f305"}, - {file = "pydantic-1.10.9.tar.gz", hash = "sha256:95c70da2cd3b6ddf3b9645ecaa8d98f3d80c606624b6d245558d202cd23ea3be"}, + {file = "pydantic-1.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adad1ee4ab9888f12dac2529276704e719efcf472e38df7813f5284db699b4ec"}, + {file = "pydantic-1.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a7db03339893feef2092ff7b1afc9497beed15ebd4af84c3042a74abce02d48"}, + {file = "pydantic-1.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b3714b97ff84b2689654851c2426389bcabfac9080617bcf4306c69db606f6"}, + {file = "pydantic-1.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edfdf0a5abc5c9bf2052ebaec20e67abd52e92d257e4f2d30e02c354ed3e6030"}, + {file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a3b30fd255eeeb63caa9483502ba96b7795ce5bf895c6a179b3d909d9f53a6"}, + {file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db4c7f7e60ca6f7d6c1785070f3e5771fcb9b2d88546e334d2f2c3934d949028"}, + {file = "pydantic-1.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:a2d5be50ac4a0976817144c7d653e34df2f9436d15555189f5b6f61161d64183"}, + {file = "pydantic-1.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:566a04ba755e8f701b074ffb134ddb4d429f75d5dced3fbd829a527aafe74c71"}, + {file = "pydantic-1.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f79db3652ed743309f116ba863dae0c974a41b688242482638b892246b7db21d"}, + {file = "pydantic-1.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62376890b819bebe3c717a9ac841a532988372b7e600e76f75c9f7c128219d5"}, + {file = "pydantic-1.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4870f13a4fafd5bc3e93cff3169222534fad867918b188e83ee0496452978437"}, + {file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:990027e77cda6072a566e433b6962ca3b96b4f3ae8bd54748e9d62a58284d9d7"}, + {file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8c40964596809eb616d94f9c7944511f620a1103d63d5510440ed2908fc410af"}, + {file = "pydantic-1.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:ea9eebc2ebcba3717e77cdeee3f6203ffc0e78db5f7482c68b1293e8cc156e5e"}, + {file = "pydantic-1.10.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:762aa598f79b4cac2f275d13336b2dd8662febee2a9c450a49a2ab3bec4b385f"}, + {file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dab5219659f95e357d98d70577b361383057fb4414cfdb587014a5f5c595f7b"}, + {file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3d4ee957a727ccb5a36f1b0a6dbd9fad5dedd2a41eada99a8df55c12896e18d"}, + {file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b69f9138dec566962ec65623c9d57bee44412d2fc71065a5f3ebb3820bdeee96"}, + {file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7aa75d1bd9cc275cf9782f50f60cddaf74cbaae19b6ada2a28e737edac420312"}, + {file = "pydantic-1.10.10-cp37-cp37m-win_amd64.whl", hash = "sha256:9f62a727f5c590c78c2d12fda302d1895141b767c6488fe623098f8792255fe5"}, + {file = "pydantic-1.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aac218feb4af73db8417ca7518fb3bade4534fcca6e3fb00f84966811dd94450"}, + {file = "pydantic-1.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88546dc10a40b5b52cae87d64666787aeb2878f9a9b37825aedc2f362e7ae1da"}, + {file = "pydantic-1.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c41bbaae89e32fc582448e71974de738c055aef5ab474fb25692981a08df808a"}, + {file = "pydantic-1.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b71bd504d1573b0b722ae536e8ffb796bedeef978979d076bf206e77dcc55a5"}, + {file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e088e3865a2270ecbc369924cd7d9fbc565667d9158e7f304e4097ebb9cf98dd"}, + {file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3403a090db45d4027d2344859d86eb797484dfda0706cf87af79ace6a35274ef"}, + {file = "pydantic-1.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:e0014e29637125f4997c174dd6167407162d7af0da73414a9340461ea8573252"}, + {file = "pydantic-1.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9965e49c6905840e526e5429b09e4c154355b6ecc0a2f05492eda2928190311d"}, + {file = "pydantic-1.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:748d10ab6089c5d196e1c8be9de48274f71457b01e59736f7a09c9dc34f51887"}, + {file = "pydantic-1.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86936c383f7c38fd26d35107eb669c85d8f46dfceae873264d9bab46fe1c7dde"}, + {file = "pydantic-1.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a26841be620309a9697f5b1ffc47dce74909e350c5315ccdac7a853484d468a"}, + {file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:409b810f387610cc7405ab2fa6f62bdf7ea485311845a242ebc0bd0496e7e5ac"}, + {file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ce937a2a2c020bcad1c9fde02892392a1123de6dda906ddba62bfe8f3e5989a2"}, + {file = "pydantic-1.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:37ebddef68370e6f26243acc94de56d291e01227a67b2ace26ea3543cf53dd5f"}, + {file = "pydantic-1.10.10-py3-none-any.whl", hash = "sha256:a5939ec826f7faec434e2d406ff5e4eaf1716eb1f247d68cd3d0b3612f7b4c8a"}, + {file = "pydantic-1.10.10.tar.gz", hash = "sha256:3b8d5bd97886f9eb59260594207c9f57dce14a6f869c6ceea90188715d29921a"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index fc47b1ef71..67ee3f1738 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -207,7 +207,8 @@ packaging = ">=16.1" # which shipped in Python 3.8. This corresponds to version 1.4 of the backport. importlib_metadata = { version = ">=1.4", python = "<3.8" } # This is the most recent version of Pydantic with available on common distros. -pydantic = ">=1.7.4" +# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858) +pydantic = "^1.7.4" # This is for building the rust components during "poetry install", which # currently ignores the `build-system.requires` directive (c.f. From 649848627c50777f8c93e8b980e903ab39ea019a Mon Sep 17 00:00:00 2001 From: Paarth Shah Date: Tue, 4 Jul 2023 00:33:24 -0700 Subject: [PATCH 178/562] Pin `pydantic` to <2.0.0 (#15862) Signed-off-by: Paarth Shah --- changelog.d/15862.bugfix | 3 ++ poetry.lock | 74 ++++++++++++++++++++-------------------- pyproject.toml | 3 +- 3 files changed, 42 insertions(+), 38 deletions(-) create mode 100644 changelog.d/15862.bugfix diff --git a/changelog.d/15862.bugfix b/changelog.d/15862.bugfix new file mode 100644 index 0000000000..8eb6aa9a7f --- /dev/null +++ b/changelog.d/15862.bugfix @@ -0,0 +1,3 @@ +Pin `pydantic` to ^=1.7.4 to avoid backwards-incompatible API changes from the 2.0.0 release. +Resolves https://github.com/matrix-org/synapse/issues/15858. +Contributed by @PaarthShah. diff --git a/poetry.lock b/poetry.lock index ee19c246f3..5e0d192c96 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1829,47 +1829,47 @@ files = [ [[package]] name = "pydantic" -version = "1.10.9" +version = "1.10.10" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e692dec4a40bfb40ca530e07805b1208c1de071a18d26af4a2a0d79015b352ca"}, - {file = "pydantic-1.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c52eb595db83e189419bf337b59154bdcca642ee4b2a09e5d7797e41ace783f"}, - {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:939328fd539b8d0edf244327398a667b6b140afd3bf7e347cf9813c736211896"}, - {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b48d3d634bca23b172f47f2335c617d3fcb4b3ba18481c96b7943a4c634f5c8d"}, - {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f0b7628fb8efe60fe66fd4adadd7ad2304014770cdc1f4934db41fe46cc8825f"}, - {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1aa5c2410769ca28aa9a7841b80d9d9a1c5f223928ca8bec7e7c9a34d26b1d4"}, - {file = "pydantic-1.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:eec39224b2b2e861259d6f3c8b6290d4e0fbdce147adb797484a42278a1a486f"}, - {file = "pydantic-1.10.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d111a21bbbfd85c17248130deac02bbd9b5e20b303338e0dbe0faa78330e37e0"}, - {file = "pydantic-1.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e9aec8627a1a6823fc62fb96480abe3eb10168fd0d859ee3d3b395105ae19a7"}, - {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07293ab08e7b4d3c9d7de4949a0ea571f11e4557d19ea24dd3ae0c524c0c334d"}, - {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee829b86ce984261d99ff2fd6e88f2230068d96c2a582f29583ed602ef3fc2c"}, - {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b466a23009ff5cdd7076eb56aca537c745ca491293cc38e72bf1e0e00de5b91"}, - {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7847ca62e581e6088d9000f3c497267868ca2fa89432714e21a4fb33a04d52e8"}, - {file = "pydantic-1.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:7845b31959468bc5b78d7b95ec52fe5be32b55d0d09983a877cca6aedc51068f"}, - {file = "pydantic-1.10.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:517a681919bf880ce1dac7e5bc0c3af1e58ba118fd774da2ffcd93c5f96eaece"}, - {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67195274fd27780f15c4c372f4ba9a5c02dad6d50647b917b6a92bf00b3d301a"}, - {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2196c06484da2b3fded1ab6dbe182bdabeb09f6318b7fdc412609ee2b564c49a"}, - {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6257bb45ad78abacda13f15bde5886efd6bf549dd71085e64b8dcf9919c38b60"}, - {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3283b574b01e8dbc982080d8287c968489d25329a463b29a90d4157de4f2baaf"}, - {file = "pydantic-1.10.9-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8bbaf4013b9a50e8100333cc4e3fa2f81214033e05ac5aa44fa24a98670a29"}, - {file = "pydantic-1.10.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9cd67fb763248cbe38f0593cd8611bfe4b8ad82acb3bdf2b0898c23415a1f82"}, - {file = "pydantic-1.10.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f50e1764ce9353be67267e7fd0da08349397c7db17a562ad036aa7c8f4adfdb6"}, - {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73ef93e5e1d3c8e83f1ff2e7fdd026d9e063c7e089394869a6e2985696693766"}, - {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128d9453d92e6e81e881dd7e2484e08d8b164da5507f62d06ceecf84bf2e21d3"}, - {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ad428e92ab68798d9326bb3e5515bc927444a3d71a93b4a2ca02a8a5d795c572"}, - {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fab81a92f42d6d525dd47ced310b0c3e10c416bbfae5d59523e63ea22f82b31e"}, - {file = "pydantic-1.10.9-cp38-cp38-win_amd64.whl", hash = "sha256:963671eda0b6ba6926d8fc759e3e10335e1dc1b71ff2a43ed2efd6996634dafb"}, - {file = "pydantic-1.10.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:970b1bdc6243ef663ba5c7e36ac9ab1f2bfecb8ad297c9824b542d41a750b298"}, - {file = "pydantic-1.10.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7e1d5290044f620f80cf1c969c542a5468f3656de47b41aa78100c5baa2b8276"}, - {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83fcff3c7df7adff880622a98022626f4f6dbce6639a88a15a3ce0f96466cb60"}, - {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0da48717dc9495d3a8f215e0d012599db6b8092db02acac5e0d58a65248ec5bc"}, - {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0a2aabdc73c2a5960e87c3ffebca6ccde88665616d1fd6d3db3178ef427b267a"}, - {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9863b9420d99dfa9c064042304868e8ba08e89081428a1c471858aa2af6f57c4"}, - {file = "pydantic-1.10.9-cp39-cp39-win_amd64.whl", hash = "sha256:e7c9900b43ac14110efa977be3da28931ffc74c27e96ee89fbcaaf0b0fe338e1"}, - {file = "pydantic-1.10.9-py3-none-any.whl", hash = "sha256:6cafde02f6699ce4ff643417d1a9223716ec25e228ddc3b436fe7e2d25a1f305"}, - {file = "pydantic-1.10.9.tar.gz", hash = "sha256:95c70da2cd3b6ddf3b9645ecaa8d98f3d80c606624b6d245558d202cd23ea3be"}, + {file = "pydantic-1.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adad1ee4ab9888f12dac2529276704e719efcf472e38df7813f5284db699b4ec"}, + {file = "pydantic-1.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a7db03339893feef2092ff7b1afc9497beed15ebd4af84c3042a74abce02d48"}, + {file = "pydantic-1.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b3714b97ff84b2689654851c2426389bcabfac9080617bcf4306c69db606f6"}, + {file = "pydantic-1.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edfdf0a5abc5c9bf2052ebaec20e67abd52e92d257e4f2d30e02c354ed3e6030"}, + {file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a3b30fd255eeeb63caa9483502ba96b7795ce5bf895c6a179b3d909d9f53a6"}, + {file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db4c7f7e60ca6f7d6c1785070f3e5771fcb9b2d88546e334d2f2c3934d949028"}, + {file = "pydantic-1.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:a2d5be50ac4a0976817144c7d653e34df2f9436d15555189f5b6f61161d64183"}, + {file = "pydantic-1.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:566a04ba755e8f701b074ffb134ddb4d429f75d5dced3fbd829a527aafe74c71"}, + {file = "pydantic-1.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f79db3652ed743309f116ba863dae0c974a41b688242482638b892246b7db21d"}, + {file = "pydantic-1.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62376890b819bebe3c717a9ac841a532988372b7e600e76f75c9f7c128219d5"}, + {file = "pydantic-1.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4870f13a4fafd5bc3e93cff3169222534fad867918b188e83ee0496452978437"}, + {file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:990027e77cda6072a566e433b6962ca3b96b4f3ae8bd54748e9d62a58284d9d7"}, + {file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8c40964596809eb616d94f9c7944511f620a1103d63d5510440ed2908fc410af"}, + {file = "pydantic-1.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:ea9eebc2ebcba3717e77cdeee3f6203ffc0e78db5f7482c68b1293e8cc156e5e"}, + {file = "pydantic-1.10.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:762aa598f79b4cac2f275d13336b2dd8662febee2a9c450a49a2ab3bec4b385f"}, + {file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dab5219659f95e357d98d70577b361383057fb4414cfdb587014a5f5c595f7b"}, + {file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3d4ee957a727ccb5a36f1b0a6dbd9fad5dedd2a41eada99a8df55c12896e18d"}, + {file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b69f9138dec566962ec65623c9d57bee44412d2fc71065a5f3ebb3820bdeee96"}, + {file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7aa75d1bd9cc275cf9782f50f60cddaf74cbaae19b6ada2a28e737edac420312"}, + {file = "pydantic-1.10.10-cp37-cp37m-win_amd64.whl", hash = "sha256:9f62a727f5c590c78c2d12fda302d1895141b767c6488fe623098f8792255fe5"}, + {file = "pydantic-1.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aac218feb4af73db8417ca7518fb3bade4534fcca6e3fb00f84966811dd94450"}, + {file = "pydantic-1.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88546dc10a40b5b52cae87d64666787aeb2878f9a9b37825aedc2f362e7ae1da"}, + {file = "pydantic-1.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c41bbaae89e32fc582448e71974de738c055aef5ab474fb25692981a08df808a"}, + {file = "pydantic-1.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b71bd504d1573b0b722ae536e8ffb796bedeef978979d076bf206e77dcc55a5"}, + {file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e088e3865a2270ecbc369924cd7d9fbc565667d9158e7f304e4097ebb9cf98dd"}, + {file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3403a090db45d4027d2344859d86eb797484dfda0706cf87af79ace6a35274ef"}, + {file = "pydantic-1.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:e0014e29637125f4997c174dd6167407162d7af0da73414a9340461ea8573252"}, + {file = "pydantic-1.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9965e49c6905840e526e5429b09e4c154355b6ecc0a2f05492eda2928190311d"}, + {file = "pydantic-1.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:748d10ab6089c5d196e1c8be9de48274f71457b01e59736f7a09c9dc34f51887"}, + {file = "pydantic-1.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86936c383f7c38fd26d35107eb669c85d8f46dfceae873264d9bab46fe1c7dde"}, + {file = "pydantic-1.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a26841be620309a9697f5b1ffc47dce74909e350c5315ccdac7a853484d468a"}, + {file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:409b810f387610cc7405ab2fa6f62bdf7ea485311845a242ebc0bd0496e7e5ac"}, + {file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ce937a2a2c020bcad1c9fde02892392a1123de6dda906ddba62bfe8f3e5989a2"}, + {file = "pydantic-1.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:37ebddef68370e6f26243acc94de56d291e01227a67b2ace26ea3543cf53dd5f"}, + {file = "pydantic-1.10.10-py3-none-any.whl", hash = "sha256:a5939ec826f7faec434e2d406ff5e4eaf1716eb1f247d68cd3d0b3612f7b4c8a"}, + {file = "pydantic-1.10.10.tar.gz", hash = "sha256:3b8d5bd97886f9eb59260594207c9f57dce14a6f869c6ceea90188715d29921a"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index fc47b1ef71..67ee3f1738 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -207,7 +207,8 @@ packaging = ">=16.1" # which shipped in Python 3.8. This corresponds to version 1.4 of the backport. importlib_metadata = { version = ">=1.4", python = "<3.8" } # This is the most recent version of Pydantic with available on common distros. -pydantic = ">=1.7.4" +# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858) +pydantic = "^1.7.4" # This is for building the rust components during "poetry install", which # currently ignores the `build-system.requires` directive (c.f. From 664ba140807750137e2e383eceaa98471f47b575 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 4 Jul 2023 16:25:33 +0100 Subject: [PATCH 179/562] 1.87.0 --- CHANGES.md | 13 +++++++++++++ changelog.d/15846.misc | 1 - changelog.d/15862.bugfix | 3 --- debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 20 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/15846.misc delete mode 100644 changelog.d/15862.bugfix diff --git a/CHANGES.md b/CHANGES.md index 2765045a13..134537f6ef 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,16 @@ +# Synapse 1.87.0 (2023-07-04) + +### Bugfixes + +- Pin `pydantic` to `^1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release. + Resolves https://github.com/matrix-org/synapse/issues/15858. + Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862)) + +### Internal Changes + +- Split out 2022 changes from the changelog so the rendered version in GitHub doesn't timeout as much. ([\#15846](https://github.com/matrix-org/synapse/issues/15846)) + + # Synapse 1.87.0rc1 (2023-06-27) Please note that this will be the last release of Synapse that is compatible with diff --git a/changelog.d/15846.misc b/changelog.d/15846.misc deleted file mode 100644 index f1c31d6663..0000000000 --- a/changelog.d/15846.misc +++ /dev/null @@ -1 +0,0 @@ -Split out 2022 changes from the changelog so the rendered version in GitHub doesn't timeout as much. diff --git a/changelog.d/15862.bugfix b/changelog.d/15862.bugfix deleted file mode 100644 index 8eb6aa9a7f..0000000000 --- a/changelog.d/15862.bugfix +++ /dev/null @@ -1,3 +0,0 @@ -Pin `pydantic` to ^=1.7.4 to avoid backwards-incompatible API changes from the 2.0.0 release. -Resolves https://github.com/matrix-org/synapse/issues/15858. -Contributed by @PaarthShah. diff --git a/debian/changelog b/debian/changelog index 2fa8d30fe1..0d9216bee8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.87.0) stable; urgency=medium + + * New Synapse release 1.87.0. + + -- Synapse Packaging team Tue, 04 Jul 2023 16:24:00 +0100 + matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium * New synapse release 1.87.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 67ee3f1738..192a07756b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.87.0rc1" +version = "1.87.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 718d7dfef2f8668c4708536d3701030eea3c207d Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 4 Jul 2023 16:26:50 +0100 Subject: [PATCH 180/562] Move warning up to the top --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 134537f6ef..4c8ecbb352 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,8 @@ # Synapse 1.87.0 (2023-07-04) +Please note that this will be the last release of Synapse that is compatible with +Python 3.7 and earlier. + ### Bugfixes - Pin `pydantic` to `^1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release. @@ -13,9 +16,6 @@ # Synapse 1.87.0rc1 (2023-06-27) -Please note that this will be the last release of Synapse that is compatible with -Python 3.7 and earlier. - ### Features - Improve `/messages` response time by avoiding backfill when we already have messages to return. ([\#15737](https://github.com/matrix-org/synapse/issues/15737)) From 1294d10c704a891392caf8c358fa8e00b1492874 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 4 Jul 2023 16:34:41 +0100 Subject: [PATCH 181/562] Add notes about Python 3.7 EOL --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 4c8ecbb352..860e89ed99 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,6 +2,8 @@ Please note that this will be the last release of Synapse that is compatible with Python 3.7 and earlier. +This is due to Python 3.7 now having reached End of Life; see our [deprecation policy](https://matrix-org.github.io/synapse/v1.87/deprecation_policy.html) +for more details. ### Bugfixes From c8e81898b66086ee8bdfd18bd24452c26033e480 Mon Sep 17 00:00:00 2001 From: Michael Weimann Date: Wed, 5 Jul 2023 00:03:20 +0200 Subject: [PATCH 182/562] Add not_user_type param to the list accounts admin API (#15844) Signed-off-by: Michael Weimann --- changelog.d/15844.feature | 1 + docs/admin_api/user_admin_api.md | 3 + synapse/rest/admin/users.py | 9 +++ synapse/storage/databases/main/__init__.py | 37 ++++++++++ tests/rest/admin/test_user.py | 78 ++++++++++++++++++++++ 5 files changed, 128 insertions(+) create mode 100644 changelog.d/15844.feature diff --git a/changelog.d/15844.feature b/changelog.d/15844.feature new file mode 100644 index 0000000000..c220055d41 --- /dev/null +++ b/changelog.d/15844.feature @@ -0,0 +1 @@ +Add `not_user_type` param to the list accounts admin API. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 229942b311..f17e60b1cb 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -242,6 +242,9 @@ The following parameters should be set in the URL: - `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. Setting this value to `b` will reverse the above sort order. Defaults to `f`. +- `not_user_type` - Exclude certain user types, such as bot users, from the request. + Can be provided multiple times. Possible values are `bot`, `support` or "empty string". + "empty string" here means to exclude users without a type. Caution. The database only has indexes on the columns `name` and `creation_ts`. This means that if a different sort order is used (`is_guest`, `admin`, diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 407fe9c804..e0257daa75 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -28,6 +28,7 @@ from synapse.http.servlet import ( parse_integer, parse_json_object_from_request, parse_string, + parse_strings_from_args, ) from synapse.http.site import SynapseRequest from synapse.rest.admin._base import ( @@ -64,6 +65,9 @@ class UsersRestServletV2(RestServlet): The parameter `guests` can be used to exclude guest users. The parameter `deactivated` can be used to include deactivated users. The parameter `order_by` can be used to order the result. + The parameter `not_user_type` can be used to exclude certain user types. + Possible values are `bot`, `support` or "empty string". + "empty string" here means to exclude users without a type. """ def __init__(self, hs: "HomeServer"): @@ -131,6 +135,10 @@ class UsersRestServletV2(RestServlet): direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) + # twisted.web.server.Request.args is incorrectly defined as Optional[Any] + args: Dict[bytes, List[bytes]] = request.args # type: ignore + not_user_types = parse_strings_from_args(args, "not_user_type") + users, total = await self.store.get_users_paginate( start, limit, @@ -141,6 +149,7 @@ class UsersRestServletV2(RestServlet): order_by, direction, approved, + not_user_types, ) # If support for MSC3866 is not enabled, don't show the approval flag. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 3a10c265c9..80c0304b19 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast from synapse.api.constants import Direction from synapse.config.homeserver import HomeServerConfig +from synapse.storage._base import make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, @@ -170,6 +171,7 @@ class DataStore( order_by: str = UserSortOrder.NAME.value, direction: Direction = Direction.FORWARDS, approved: bool = True, + not_user_types: Optional[List[str]] = None, ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users from users list. This will return a json list of users and the @@ -185,6 +187,7 @@ class DataStore( order_by: the sort order of the returned list direction: sort ascending or descending approved: whether to include approved users + not_user_types: list of user types to exclude Returns: A tuple of a list of mappings from user to information and a count of total users. """ @@ -222,6 +225,40 @@ class DataStore( # be already existing users that we consider as already approved. filters.append("approved IS FALSE") + if not_user_types: + if len(not_user_types) == 1 and not_user_types[0] == "": + # Only exclude NULL type users + filters.append("user_type IS NOT NULL") + else: + not_user_types_has_empty = False + not_user_types_without_empty = [] + + for not_user_type in not_user_types: + if not_user_type == "": + not_user_types_has_empty = True + else: + not_user_types_without_empty.append(not_user_type) + + not_user_type_clause, not_user_type_args = make_in_list_sql_clause( + self.database_engine, + "u.user_type", + not_user_types_without_empty, + ) + + if not_user_types_has_empty: + # NULL values should be excluded. + # They evaluate to false > nothing to do here. + filters.append("NOT %s" % (not_user_type_clause)) + else: + # NULL values should *not* be excluded. + # Add a special predicate to the query. + filters.append( + "(NOT %s OR %s IS NULL)" + % (not_user_type_clause, "u.user_type") + ) + + args.extend(not_user_type_args) + where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" sql_base = f""" diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 434bb56d44..a17a1bb1d8 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -933,6 +933,84 @@ class UsersListTestCase(unittest.HomeserverTestCase): self.assertEqual(1, len(non_admin_user_ids), non_admin_user_ids) self.assertEqual(not_approved_user, non_admin_user_ids[0]) + def test_filter_not_user_types(self) -> None: + """Tests that the endpoint handles the not_user_types param""" + + regular_user_id = self.register_user("normalo", "secret") + + bot_user_id = self.register_user("robo", "secret") + self.make_request( + "PUT", + "/_synapse/admin/v2/users/" + urllib.parse.quote(bot_user_id), + {"user_type": UserTypes.BOT}, + access_token=self.admin_user_tok, + ) + + support_user_id = self.register_user("foo", "secret") + self.make_request( + "PUT", + "/_synapse/admin/v2/users/" + urllib.parse.quote(support_user_id), + {"user_type": UserTypes.SUPPORT}, + access_token=self.admin_user_tok, + ) + + def test_user_type( + expected_user_ids: List[str], not_user_types: Optional[List[str]] = None + ) -> None: + """Runs a test for the not_user_types param + Args: + expected_user_ids: Ids of the users that are expected to be returned + not_user_types: List of values for the not_user_types param + """ + + user_type_query = "" + + if not_user_types is not None: + user_type_query = "&".join( + [f"not_user_type={u}" for u in not_user_types] + ) + + test_url = f"{self.url}?{user_type_query}" + channel = self.make_request( + "GET", + test_url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code) + self.assertEqual(channel.json_body["total"], len(expected_user_ids)) + self.assertEqual( + expected_user_ids, + [u["name"] for u in channel.json_body["users"]], + ) + + # Request without user_types → all users expected + test_user_type([self.admin_user, support_user_id, regular_user_id, bot_user_id]) + + # Request and exclude bot users + test_user_type( + [self.admin_user, support_user_id, regular_user_id], + not_user_types=[UserTypes.BOT], + ) + + # Request and exclude bot and support users + test_user_type( + [self.admin_user, regular_user_id], + not_user_types=[UserTypes.BOT, UserTypes.SUPPORT], + ) + + # Request and exclude empty user types → only expected the bot and support user + test_user_type([support_user_id, bot_user_id], not_user_types=[""]) + + # Request and exclude empty user types and bots → only expected the support user + test_user_type([support_user_id], not_user_types=["", UserTypes.BOT]) + + # Request and exclude a custom type (neither service nor bot) → expect all users + test_user_type( + [self.admin_user, support_user_id, regular_user_id, bot_user_id], + not_user_types=["custom"], + ) + def test_erasure_status(self) -> None: # Create a new user. user_id = self.register_user("eraseme", "eraseme") From c303eca8cc31e5eb9edb10019f02c3a9e39a47ab Mon Sep 17 00:00:00 2001 From: an0nfunc <40771419+an0nfunc@users.noreply.github.com> Date: Wed, 5 Jul 2023 10:52:12 +0200 Subject: [PATCH 183/562] use Image.LANCZOS instead of Image.ANTIALIAS for thumbnail resize (#15876) Image.ANTIALIAS is not defined in current pillow releases. Since ANTIALIAS was just using LANCZOS anyways, this is just a cosmetic change, but makes synapse work with most recent pillow releases. Signed-off-by: Giovanni Harting <539@idlegandalf.com> --- changelog.d/15876.bugfix | 1 + synapse/media/thumbnailer.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15876.bugfix diff --git a/changelog.d/15876.bugfix b/changelog.d/15876.bugfix new file mode 100644 index 0000000000..9dbae04c4f --- /dev/null +++ b/changelog.d/15876.bugfix @@ -0,0 +1 @@ +Correctly resize thumbnails with pillow version >=10. diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index f909a4fb9a..73d2272f05 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -131,7 +131,7 @@ class Thumbnailer: else: with self.image: self.image = self.image.convert("RGB") - return self.image.resize((width, height), Image.ANTIALIAS) + return self.image.resize((width, height), Image.LANCZOS) def scale(self, width: int, height: int, output_type: str) -> BytesIO: """Rescales the image to the given dimensions. From 95a96b21eb98c638ae36814ec74ba468226e373c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Jul 2023 10:43:19 +0100 Subject: [PATCH 184/562] Add foreign key constraint to `event_forward_extremities`. (#15751) --- changelog.d/15751.misc | 1 + synapse/_scripts/synapse_port_db.py | 2 + synapse/storage/background_updates.py | 335 +++++++++++++++++- synapse/storage/database.py | 37 ++ .../databases/main/event_federation.py | 10 + synapse/storage/databases/main/events.py | 12 +- .../78/03event_extremities_constraints.py | 51 +++ tests/storage/test_background_update.py | 227 +++++++++++- tests/storage/test_event_federation.py | 35 +- 9 files changed, 699 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15751.misc create mode 100644 synapse/storage/schema/main/delta/78/03event_extremities_constraints.py diff --git a/changelog.d/15751.misc b/changelog.d/15751.misc new file mode 100644 index 0000000000..e0ecea6c2f --- /dev/null +++ b/changelog.d/15751.misc @@ -0,0 +1 @@ +Add foreign key constraint to `event_forward_extremities`. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index a803ada8ad..e126a2e0c5 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -61,6 +61,7 @@ from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpda from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore +from synapse.storage.databases.main.event_federation import EventFederationWorkerStore from synapse.storage.databases.main.event_push_actions import EventPushActionsStore from synapse.storage.databases.main.events_bg_updates import ( EventsBackgroundUpdatesStore, @@ -239,6 +240,7 @@ class Store( PresenceBackgroundUpdateStore, ReceiptsBackgroundUpdateStore, RelationsWorkerStore, + EventFederationWorkerStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index edc97a9d61..5dce0a0159 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -11,8 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import abc import logging -from enum import IntEnum +from enum import Enum, IntEnum from types import TracebackType from typing import ( TYPE_CHECKING, @@ -24,12 +25,16 @@ from typing import ( Iterable, List, Optional, + Sequence, + Tuple, Type, ) import attr +from pydantic import BaseModel from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection, Cursor from synapse.types import JsonDict from synapse.util import Clock, json_encoder @@ -48,6 +53,78 @@ DEFAULT_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]] MIN_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]] +class Constraint(metaclass=abc.ABCMeta): + """Base class representing different constraints. + + Used by `register_background_validate_constraint_and_delete_rows`. + """ + + @abc.abstractmethod + def make_check_clause(self, table: str) -> str: + """Returns an SQL expression that checks the row passes the constraint.""" + pass + + @abc.abstractmethod + def make_constraint_clause_postgres(self) -> str: + """Returns an SQL clause for creating the constraint. + + Only used on Postgres DBs + """ + pass + + +@attr.s(auto_attribs=True) +class ForeignKeyConstraint(Constraint): + """A foreign key constraint. + + Attributes: + referenced_table: The "parent" table name. + columns: The list of mappings of columns from table to referenced table + """ + + referenced_table: str + columns: Sequence[Tuple[str, str]] + + def make_check_clause(self, table: str) -> str: + join_clause = " AND ".join( + f"{col1} = {table}.{col2}" for col1, col2 in self.columns + ) + return f"EXISTS (SELECT 1 FROM {self.referenced_table} WHERE {join_clause})" + + def make_constraint_clause_postgres(self) -> str: + column1_list = ", ".join(col1 for col1, col2 in self.columns) + column2_list = ", ".join(col2 for col1, col2 in self.columns) + return f"FOREIGN KEY ({column1_list}) REFERENCES {self.referenced_table} ({column2_list})" + + +@attr.s(auto_attribs=True) +class NotNullConstraint(Constraint): + """A NOT NULL column constraint""" + + column: str + + def make_check_clause(self, table: str) -> str: + return f"{self.column} IS NOT NULL" + + def make_constraint_clause_postgres(self) -> str: + return f"CHECK ({self.column} IS NOT NULL)" + + +class ValidateConstraintProgress(BaseModel): + """The format of the progress JSON for validate constraint background + updates. + + Used by `register_background_validate_constraint_and_delete_rows`. + """ + + class State(str, Enum): + check = "check" + validate = "validate" + + state: State = State.validate + lower_bound: Sequence[Any] = () + + @attr.s(slots=True, frozen=True, auto_attribs=True) class _BackgroundUpdateHandler: """A handler for a given background update. @@ -740,6 +817,179 @@ class BackgroundUpdater: logger.info("Adding index %s to %s", index_name, table) await self.db_pool.runWithConnection(runner) + def register_background_validate_constraint_and_delete_rows( + self, + update_name: str, + table: str, + constraint_name: str, + constraint: Constraint, + unique_columns: Sequence[str], + ) -> None: + """Helper for store classes to do a background validate constraint, and + delete rows that do not pass the constraint check. + + Note: This deletes rows that don't match the constraint. This may not be + appropriate in all situations, and so the suitability of using this + method should be considered on a case-by-case basis. + + This only applies on PostgreSQL. + + For SQLite the table gets recreated as part of the schema delta and the + data is copied over synchronously (or whatever the correct way to + describe it as). + + Args: + update_name: The name of the background update. + table: The table with the invalid constraint. + constraint_name: The name of the constraint + constraint: A `Constraint` object matching the type of constraint. + unique_columns: A sequence of columns that form a unique constraint + on the table. Used to iterate over the table. + """ + + assert isinstance( + self.db_pool.engine, engines.PostgresEngine + ), "validate constraint background update registered for non-Postres database" + + async def updater(progress: JsonDict, batch_size: int) -> int: + return await self.validate_constraint_and_delete_in_background( + update_name=update_name, + table=table, + constraint_name=constraint_name, + constraint=constraint, + unique_columns=unique_columns, + progress=progress, + batch_size=batch_size, + ) + + self._background_update_handlers[update_name] = _BackgroundUpdateHandler( + updater, oneshot=True + ) + + async def validate_constraint_and_delete_in_background( + self, + update_name: str, + table: str, + constraint_name: str, + constraint: Constraint, + unique_columns: Sequence[str], + progress: JsonDict, + batch_size: int, + ) -> int: + """Validates a table constraint that has been marked as `NOT VALID`, + deleting rows that don't pass the constraint check. + + This will delete rows that do not meet the validation check. + + update_name: str, + table: str, + constraint_name: str, + constraint: Constraint, + unique_columns: Sequence[str], + """ + + # We validate the constraint by: + # 1. Trying to validate the constraint as is. If this succeeds then + # we're done. + # 2. Otherwise, we manually scan the table to remove rows that don't + # match the constraint. + # 3. We try re-validating the constraint. + + parsed_progress = ValidateConstraintProgress.parse_obj(progress) + + if parsed_progress.state == ValidateConstraintProgress.State.check: + return_columns = ", ".join(unique_columns) + order_columns = ", ".join(unique_columns) + + where_clause = "" + args: List[Any] = [] + if parsed_progress.lower_bound: + where_clause = f"""WHERE ({order_columns}) > ({", ".join("?" for _ in unique_columns)})""" + args.extend(parsed_progress.lower_bound) + + args.append(batch_size) + + sql = f""" + SELECT + {return_columns}, + {constraint.make_check_clause(table)} AS check + FROM {table} + {where_clause} + ORDER BY {order_columns} + LIMIT ? + """ + + def validate_constraint_in_background_check( + txn: "LoggingTransaction", + ) -> None: + txn.execute(sql, args) + rows = txn.fetchall() + + new_progress = parsed_progress.copy() + + if not rows: + new_progress.state = ValidateConstraintProgress.State.validate + self._background_update_progress_txn( + txn, update_name, new_progress.dict() + ) + return + + new_progress.lower_bound = rows[-1][:-1] + + to_delete = [row[:-1] for row in rows if not row[-1]] + + if to_delete: + logger.warning( + "Deleting %d rows that do not pass new constraint", + len(to_delete), + ) + + self.db_pool.simple_delete_many_batch_txn( + txn, table=table, keys=unique_columns, values=to_delete + ) + + self._background_update_progress_txn( + txn, update_name, new_progress.dict() + ) + + await self.db_pool.runInteraction( + "validate_constraint_in_background_check", + validate_constraint_in_background_check, + ) + + return batch_size + + elif parsed_progress.state == ValidateConstraintProgress.State.validate: + sql = f"ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}" + + def validate_constraint_in_background_validate( + txn: "LoggingTransaction", + ) -> None: + txn.execute(sql) + + try: + await self.db_pool.runInteraction( + "validate_constraint_in_background_validate", + validate_constraint_in_background_validate, + ) + + await self._end_background_update(update_name) + except self.db_pool.engine.module.IntegrityError as e: + # If we get an integrity error here, then we go back and recheck the table. + logger.warning("Integrity error when validating constraint: %s", e) + await self._background_update_progress( + update_name, + ValidateConstraintProgress( + state=ValidateConstraintProgress.State.check + ).dict(), + ) + + return batch_size + else: + raise Exception( + f"Unrecognized state '{parsed_progress.state}' when trying to validate_constraint_and_delete_in_background" + ) + async def _end_background_update(self, update_name: str) -> None: """Removes a completed background update task from the queue. @@ -795,3 +1045,86 @@ class BackgroundUpdater: keyvalues={"update_name": update_name}, updatevalues={"progress_json": progress_json}, ) + + +def run_validate_constraint_and_delete_rows_schema_delta( + txn: "LoggingTransaction", + ordering: int, + update_name: str, + table: str, + constraint_name: str, + constraint: Constraint, + sqlite_table_name: str, + sqlite_table_schema: str, +) -> None: + """Runs a schema delta to add a constraint to the table. This should be run + in a schema delta file. + + For PostgreSQL the constraint is added and validated in the background. + + For SQLite the table is recreated and data copied across immediately. This + is done by the caller passing in a script to create the new table. Note that + table indexes and triggers are copied over automatically. + + There must be a corresponding call to + `register_background_validate_constraint_and_delete_rows` to register the + background update in one of the data store classes. + + Attributes: + txn ordering, update_name: For adding a row to background_updates table. + table: The table to add constraint to. constraint_name: The name of the + new constraint constraint: A `Constraint` object describing the + constraint sqlite_table_name: For SQLite the name of the empty copy of + table sqlite_table_schema: A SQL script for creating the above table. + """ + + if isinstance(txn.database_engine, PostgresEngine): + # For postgres we can just add the constraint and mark it as NOT VALID, + # and then insert a background update to go and check the validity in + # the background. + txn.execute( + f""" + ALTER TABLE {table} + ADD CONSTRAINT {constraint_name} {constraint.make_constraint_clause_postgres()} + NOT VALID + """ + ) + + txn.execute( + "INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (?, ?, '{}')", + (ordering, update_name), + ) + else: + # For SQLite, we: + # 1. fetch all indexes/triggers/etc related to the table + # 2. create an empty copy of the table + # 3. copy across the rows (that satisfy the check) + # 4. replace the old table with the new able. + # 5. add back all the indexes/triggers/etc + + # Fetch the indexes/triggers/etc. Note that `sql` column being null is + # due to indexes being auto created based on the class definition (e.g. + # PRIMARY KEY), and so don't need to be recreated. + txn.execute( + """ + SELECT sql FROM sqlite_master + WHERE tbl_name = ? AND type != 'table' AND sql IS NOT NULL + """, + (table,), + ) + extras = [row[0] for row in txn] + + txn.execute(sqlite_table_schema) + + sql = f""" + INSERT INTO {sqlite_table_name} SELECT * FROM {table} + WHERE {constraint.make_check_clause(table)} + """ + + txn.execute(sql) + + txn.execute(f"DROP TABLE {table}") + txn.execute(f"ALTER TABLE {sqlite_table_name} RENAME TO {table}") + + for extra in extras: + txn.execute(extra) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 7e49ae11bc..a1c8fb0f46 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -2313,6 +2313,43 @@ class DatabasePool: return txn.rowcount + @staticmethod + def simple_delete_many_batch_txn( + txn: LoggingTransaction, + table: str, + keys: Collection[str], + values: Iterable[Iterable[Any]], + ) -> None: + """Executes a DELETE query on the named table. + + The input is given as a list of rows, where each row is a list of values. + (Actually any iterable is fine.) + + Args: + txn: The transaction to use. + table: string giving the table name + keys: list of column names + values: for each row, a list of values in the same order as `keys` + """ + + if isinstance(txn.database_engine, PostgresEngine): + # We use `execute_values` as it can be a lot faster than `execute_batch`, + # but it's only available on postgres. + sql = "DELETE FROM %s WHERE (%s) IN (VALUES ?)" % ( + table, + ", ".join(k for k in keys), + ) + + txn.execute_values(sql, values, fetch=False) + else: + sql = "DELETE FROM %s WHERE (%s) = (%s)" % ( + table, + ", ".join(k for k in keys), + ", ".join("?" for _ in keys), + ) + + txn.execute_batch(sql, values) + def get_cache_dict( self, db_conn: LoggingDatabaseConnection, diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 8b6e3c1dc7..dabe603c8c 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -38,6 +38,7 @@ from synapse.events import EventBase, make_event_from_dict from synapse.logging.opentracing import tag_args, trace from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause +from synapse.storage.background_updates import ForeignKeyConstraint from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, @@ -140,6 +141,15 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000) + if isinstance(self.database_engine, PostgresEngine): + self.db_pool.updates.register_background_validate_constraint_and_delete_rows( + update_name="event_forward_extremities_event_id_foreign_key_constraint_update", + table="event_forward_extremities", + constraint_name="event_forward_extremities_event_id", + constraint=ForeignKeyConstraint("events", [("event_id", "event_id")]), + unique_columns=("event_id", "room_id"), + ) + async def get_auth_chain( self, room_id: str, event_ids: Collection[str], include_given: bool = False ) -> List[EventBase]: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 5c9db7554e..2b83a69426 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -415,12 +415,6 @@ class PersistEventsStore: backfilled=False, ) - self._update_forward_extremities_txn( - txn, - new_forward_extremities=new_forward_extremities, - max_stream_order=max_stream_order, - ) - # Ensure that we don't have the same event twice. events_and_contexts = self._filter_events_and_contexts_for_duplicates( events_and_contexts @@ -439,6 +433,12 @@ class PersistEventsStore: self._store_event_txn(txn, events_and_contexts=events_and_contexts) + self._update_forward_extremities_txn( + txn, + new_forward_extremities=new_forward_extremities, + max_stream_order=max_stream_order, + ) + self._persist_transaction_ids_txn(txn, events_and_contexts) # Insert into event_to_state_groups. diff --git a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py new file mode 100644 index 0000000000..f12e2a8f3e --- /dev/null +++ b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py @@ -0,0 +1,51 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This migration adds foreign key constraint to `event_forward_extremities` table. +""" +from synapse.storage.background_updates import ( + ForeignKeyConstraint, + run_validate_constraint_and_delete_rows_schema_delta, +) +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine + +FORWARD_EXTREMITIES_TABLE_SCHEMA = """ + CREATE TABLE event_forward_extremities2( + event_id TEXT NOT NULL, + room_id TEXT NOT NULL, + UNIQUE (event_id, room_id), + CONSTRAINT event_forward_extremities_event_id FOREIGN KEY (event_id) REFERENCES events (event_id) + ) +""" + + +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: + run_validate_constraint_and_delete_rows_schema_delta( + cur, + ordering=7803, + update_name="event_forward_extremities_event_id_foreign_key_constraint_update", + table="event_forward_extremities", + constraint_name="event_forward_extremities_event_id", + constraint=ForeignKeyConstraint("events", [("event_id", "event_id")]), + sqlite_table_name="event_forward_extremities2", + sqlite_table_schema=FORWARD_EXTREMITIES_TABLE_SCHEMA, + ) + + # We can't add a similar constraint to `event_backward_extremities` as the + # events in there don't exist in the `events` table and `event_edges` + # doesn't have a unique constraint on `prev_event_id` (so we can't make a + # foreign key point to it). diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index fd619b64d4..6ca546f3f7 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -20,7 +20,14 @@ from twisted.internet.defer import Deferred, ensureDeferred from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer -from synapse.storage.background_updates import BackgroundUpdater +from synapse.storage.background_updates import ( + BackgroundUpdater, + ForeignKeyConstraint, + NotNullConstraint, + run_validate_constraint_and_delete_rows_schema_delta, +) +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import JsonDict from synapse.util import Clock @@ -404,3 +411,221 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): self.pump() self._update_ctx_manager.__aexit__.assert_called() self.get_success(do_update_d) + + +class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): + """Tests the validate contraint and delete background handlers.""" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates + # the base test class should have run the real bg updates for us + self.assertTrue( + self.get_success(self.updates.has_completed_background_updates()) + ) + + self.store = self.hs.get_datastores().main + + def test_not_null_constraint(self) -> None: + # Create the initial tables, where we have some invalid data. + """Tests adding a not null constraint.""" + table_sql = """ + CREATE TABLE test_constraint( + a INT PRIMARY KEY, + b INT + ); + """ + self.get_success( + self.store.db_pool.execute( + "test_not_null_constraint", lambda _: None, table_sql + ) + ) + + # We add an index so that we can check that its correctly recreated when + # using SQLite. + index_sql = "CREATE INDEX test_index ON test_constraint(a)" + self.get_success( + self.store.db_pool.execute( + "test_not_null_constraint", lambda _: None, index_sql + ) + ) + + self.get_success( + self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1}) + ) + self.get_success( + self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None}) + ) + self.get_success( + self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3}) + ) + + # Now lets do the migration + + table2_sqlite = """ + CREATE TABLE test_constraint2( + a INT PRIMARY KEY, + b INT, + CONSTRAINT test_constraint_name CHECK (b is NOT NULL) + ); + """ + + def delta(txn: LoggingTransaction) -> None: + run_validate_constraint_and_delete_rows_schema_delta( + txn, + ordering=1000, + update_name="test_bg_update", + table="test_constraint", + constraint_name="test_constraint_name", + constraint=NotNullConstraint("b"), + sqlite_table_name="test_constraint2", + sqlite_table_schema=table2_sqlite, + ) + + self.get_success( + self.store.db_pool.runInteraction( + "test_not_null_constraint", + delta, + ) + ) + + if isinstance(self.store.database_engine, PostgresEngine): + # Postgres uses a background update + self.updates.register_background_validate_constraint_and_delete_rows( + "test_bg_update", + table="test_constraint", + constraint_name="test_constraint_name", + constraint=NotNullConstraint("b"), + unique_columns=["a"], + ) + + # Tell the DataStore that it hasn't finished all updates yet + self.store.db_pool.updates._all_done = False + + # Now let's actually drive the updates to completion + self.wait_for_background_updates() + + # Check the correct values are in the new table. + rows = self.get_success( + self.store.db_pool.simple_select_list( + table="test_constraint", + keyvalues={}, + retcols=("a", "b"), + ) + ) + + self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}]) + + # And check that invalid rows get correctly rejected. + self.get_failure( + self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None}), + exc=self.store.database_engine.module.IntegrityError, + ) + + # Check the index is still there for SQLite. + if isinstance(self.store.database_engine, Sqlite3Engine): + # Ensure the index exists in the schema. + self.get_success( + self.store.db_pool.simple_select_one_onecol( + table="sqlite_master", + keyvalues={"tbl_name": "test_constraint"}, + retcol="name", + ) + ) + + def test_foreign_constraint(self) -> None: + """Tests adding a not foreign key constraint.""" + + # Create the initial tables, where we have some invalid data. + base_sql = """ + CREATE TABLE base_table( + b INT PRIMARY KEY + ); + """ + + table_sql = """ + CREATE TABLE test_constraint( + a INT PRIMARY KEY, + b INT NOT NULL + ); + """ + self.get_success( + self.store.db_pool.execute( + "test_foreign_key_constraint", lambda _: None, base_sql + ) + ) + self.get_success( + self.store.db_pool.execute( + "test_foreign_key_constraint", lambda _: None, table_sql + ) + ) + + self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 1})) + self.get_success( + self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1}) + ) + self.get_success( + self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2}) + ) + self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 3})) + self.get_success( + self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3}) + ) + + table2_sqlite = """ + CREATE TABLE test_constraint2( + a INT PRIMARY KEY, + b INT NOT NULL, + CONSTRAINT test_constraint_name FOREIGN KEY (b) REFERENCES base_table (b) + ); + """ + + def delta(txn: LoggingTransaction) -> None: + run_validate_constraint_and_delete_rows_schema_delta( + txn, + ordering=1000, + update_name="test_bg_update", + table="test_constraint", + constraint_name="test_constraint_name", + constraint=ForeignKeyConstraint("base_table", [("b", "b")]), + sqlite_table_name="test_constraint2", + sqlite_table_schema=table2_sqlite, + ) + + self.get_success( + self.store.db_pool.runInteraction( + "test_foreign_key_constraint", + delta, + ) + ) + + if isinstance(self.store.database_engine, PostgresEngine): + # Postgres uses a background update + self.updates.register_background_validate_constraint_and_delete_rows( + "test_bg_update", + table="test_constraint", + constraint_name="test_constraint_name", + constraint=ForeignKeyConstraint("base_table", [("b", "b")]), + unique_columns=["a"], + ) + + # Tell the DataStore that it hasn't finished all updates yet + self.store.db_pool.updates._all_done = False + + # Now let's actually drive the updates to completion + self.wait_for_background_updates() + + # Check the correct values are in the new table. + rows = self.get_success( + self.store.db_pool.simple_select_list( + table="test_constraint", + keyvalues={}, + retcols=("a", "b"), + ) + ) + self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}]) + + # And check that invalid rows get correctly rejected. + self.get_failure( + self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2}), + exc=self.store.database_engine.module.IntegrityError, + ) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 0f3b0744f1..9c151a5e62 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -20,6 +20,7 @@ from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor +from synapse.api.constants import EventTypes from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, EventFormatVersions, @@ -98,8 +99,32 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): room2 = "#room2" room3 = "#room3" - def insert_event(txn: Cursor, i: int, room_id: str) -> None: + def insert_event(txn: LoggingTransaction, i: int, room_id: str) -> None: event_id = "$event_%i:local" % i + + # We need to insert into events table to get around the foreign key constraint. + self.store.db_pool.simple_insert_txn( + txn, + table="events", + values={ + "instance_name": "master", + "stream_ordering": self.store._stream_id_gen.get_next_txn(txn), + "topological_ordering": 1, + "depth": 1, + "event_id": event_id, + "room_id": room_id, + "type": EventTypes.Message, + "processed": True, + "outlier": False, + "origin_server_ts": 0, + "received_ts": 0, + "sender": "@user:local", + "contains_url": False, + "state_key": None, + "rejection_reason": None, + }, + ) + txn.execute( ( "INSERT INTO event_forward_extremities (room_id, event_id) " @@ -113,10 +138,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): self.store.db_pool.runInteraction("insert", insert_event, i, room1) ) self.get_success( - self.store.db_pool.runInteraction("insert", insert_event, i, room2) + self.store.db_pool.runInteraction( + "insert", insert_event, i + 100, room2 + ) ) self.get_success( - self.store.db_pool.runInteraction("insert", insert_event, i, room3) + self.store.db_pool.runInteraction( + "insert", insert_event, i + 200, room3 + ) ) # Test simple case From 4cf9f92f395e8c448b94eccb48fbfe2e7e61d7cd Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 5 Jul 2023 05:44:02 -0500 Subject: [PATCH 185/562] Fix could not serialize access due to concurrent `DELETE` from presence_stream (#15826) * Change update_presence to have a isolation level of READ_COMMITTED * changelog --- changelog.d/15826.misc | 1 + synapse/storage/databases/main/presence.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15826.misc diff --git a/changelog.d/15826.misc b/changelog.d/15826.misc new file mode 100644 index 0000000000..88903f3f7c --- /dev/null +++ b/changelog.d/15826.misc @@ -0,0 +1 @@ +Use lower isolation level when cleaning old presence stream data to avoid serialization errors. diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index beb210f8ee..b51d20ac26 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, cast from synapse.api.presence import PresenceState, UserPresenceState @@ -24,6 +23,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.engines import PostgresEngine +from synapse.storage.engines._base import IsolationLevel from synapse.storage.types import Connection from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, @@ -115,11 +115,16 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) ) async with stream_ordering_manager as stream_orderings: + # Run the interaction with an isolation level of READ_COMMITTED to avoid + # serialization errors(and rollbacks) in the database. This way it will + # ignore new rows during the DELETE, but will pick them up the next time + # this is run. Currently, that is between 5-60 seconds. await self.db_pool.runInteraction( "update_presence", self._update_presence_txn, stream_orderings, presence_states, + isolation_level=IsolationLevel.READ_COMMITTED, ) return stream_orderings[-1], self._presence_id_gen.get_current_token() From cc780b3f7711b422698ef3392c2caf146312855d Mon Sep 17 00:00:00 2001 From: Sumner Evans Date: Wed, 5 Jul 2023 08:15:56 -0600 Subject: [PATCH 186/562] docs/admin_api: fix header level on 'Users' page (#15852) Signed-off-by: Sumner Evans --- changelog.d/15852.doc | 1 + docs/admin_api/user_admin_api.md | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15852.doc diff --git a/changelog.d/15852.doc b/changelog.d/15852.doc new file mode 100644 index 0000000000..060b55d106 --- /dev/null +++ b/changelog.d/15852.doc @@ -0,0 +1 @@ +Fixed header levels on the Admin API "Users" documentation page. Contributed by @sumnerevans at @beeper. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index f17e60b1cb..23f465e98d 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -1183,7 +1183,7 @@ The following parameters should be set in the URL: - `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must be local. -### Check username availability +## Check username availability Checks to see if a username is available, and valid, for the server. See [the client-server API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) @@ -1201,7 +1201,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API. -### Find a user based on their ID in an auth provider +## Find a user based on their ID in an auth provider The API is: @@ -1240,7 +1240,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like _Added in Synapse 1.68.0._ -### Find a user based on their Third Party ID (ThreePID or 3PID) +## Find a user based on their Third Party ID (ThreePID or 3PID) The API is: From ce857c05d5f6fa6b66c4a59c4917c440c9b98047 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 5 Jul 2023 10:22:21 -0500 Subject: [PATCH 187/562] Add tracing to media `/upload` endpoint (#15850) Add tracing instrumentation to media `/upload` code paths to investigate https://github.com/matrix-org/synapse/issues/15841 --- changelog.d/15850.misc | 1 + synapse/media/media_repository.py | 3 +++ synapse/media/media_storage.py | 7 +++++++ synapse/media/storage_provider.py | 5 +++++ synapse/media/thumbnailer.py | 5 +++++ synapse/module_api/callbacks/spamchecker_callbacks.py | 1 + synapse/storage/databases/main/media_repository.py | 5 +++++ 7 files changed, 27 insertions(+) create mode 100644 changelog.d/15850.misc diff --git a/changelog.d/15850.misc b/changelog.d/15850.misc new file mode 100644 index 0000000000..0e49ab23fe --- /dev/null +++ b/changelog.d/15850.misc @@ -0,0 +1 @@ +Add tracing to media `/upload` code paths. diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index e81c987b10..4b750c700b 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( from synapse.config.repository import ThumbnailRequirement from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread +from synapse.logging.opentracing import trace from synapse.media._base import ( FileInfo, Responder, @@ -174,6 +175,7 @@ class MediaRepository: else: self.recently_accessed_locals.add(media_id) + @trace async def create_content( self, media_type: str, @@ -710,6 +712,7 @@ class MediaRepository: # Could not generate thumbnail. return None + @trace async def _generate_thumbnails( self, server_name: Optional[str], diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index a819d95407..eebcbc48e8 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -38,6 +38,7 @@ from twisted.protocols.basic import FileSender from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable +from synapse.logging.opentracing import trace from synapse.util import Clock from synapse.util.file_consumer import BackgroundFileConsumer @@ -76,6 +77,7 @@ class MediaStorage: self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.clock = hs.get_clock() + @trace async def store_file(self, source: IO, file_info: FileInfo) -> str: """Write `source` to the on disk media store, and also any other configured storage providers @@ -95,10 +97,12 @@ class MediaStorage: return fname + @trace async def write_to_file(self, source: IO, output: IO) -> None: """Asynchronously write the `source` to `output`.""" await defer_to_thread(self.reactor, _write_file_synchronously, source, output) + @trace @contextlib.contextmanager def store_into_file( self, file_info: FileInfo @@ -214,6 +218,7 @@ class MediaStorage: return None + @trace async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str: """Ensures that the given file is in the local cache. Attempts to download it from storage providers if it isn't. @@ -259,6 +264,7 @@ class MediaStorage: raise NotFoundError() + @trace def _file_info_to_path(self, file_info: FileInfo) -> str: """Converts file_info into a relative path. @@ -301,6 +307,7 @@ class MediaStorage: return self.filepaths.local_media_filepath_rel(file_info.file_id) +@trace def _write_file_synchronously(source: IO, dest: IO) -> None: """Write `source` to the file like `dest` synchronously. Should be called from a thread. diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py index 1c9b71d69c..0aea3a7a0d 100644 --- a/synapse/media/storage_provider.py +++ b/synapse/media/storage_provider.py @@ -20,6 +20,7 @@ from typing import TYPE_CHECKING, Callable, Optional from synapse.config._base import Config from synapse.logging.context import defer_to_thread, run_in_background +from synapse.logging.opentracing import trace from synapse.util.async_helpers import maybe_awaitable from ._base import FileInfo, Responder @@ -86,6 +87,7 @@ class StorageProviderWrapper(StorageProvider): def __str__(self) -> str: return "StorageProviderWrapper[%s]" % (self.backend,) + @trace async def store_file(self, path: str, file_info: FileInfo) -> None: if not file_info.server_name and not self.store_local: return None @@ -114,6 +116,7 @@ class StorageProviderWrapper(StorageProvider): run_in_background(store) + @trace async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: if file_info.url_cache: # Files in the URL preview cache definitely aren't stored here, @@ -141,6 +144,7 @@ class FileStorageProviderBackend(StorageProvider): def __str__(self) -> str: return "FileStorageProviderBackend[%s]" % (self.base_directory,) + @trace async def store_file(self, path: str, file_info: FileInfo) -> None: """See StorageProvider.store_file""" @@ -159,6 +163,7 @@ class FileStorageProviderBackend(StorageProvider): backup_fname, ) + @trace async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: """See StorageProvider.fetch""" diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 73d2272f05..2bfa58ceee 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -19,6 +19,8 @@ from typing import Optional, Tuple, Type from PIL import Image +from synapse.logging.opentracing import trace + logger = logging.getLogger(__name__) EXIF_ORIENTATION_TAG = 0x0112 @@ -82,6 +84,7 @@ class Thumbnailer: # A lot of parsing errors can happen when parsing EXIF logger.info("Error parsing image EXIF information: %s", e) + @trace def transpose(self) -> Tuple[int, int]: """Transpose the image using its EXIF Orientation tag @@ -133,6 +136,7 @@ class Thumbnailer: self.image = self.image.convert("RGB") return self.image.resize((width, height), Image.LANCZOS) + @trace def scale(self, width: int, height: int, output_type: str) -> BytesIO: """Rescales the image to the given dimensions. @@ -142,6 +146,7 @@ class Thumbnailer: with self._resize(width, height) as scaled: return self._encode_image(scaled, output_type) + @trace def crop(self, width: int, height: int, output_type: str) -> BytesIO: """Rescales and crops the image to the given dimensions preserving aspect:: diff --git a/synapse/module_api/callbacks/spamchecker_callbacks.py b/synapse/module_api/callbacks/spamchecker_callbacks.py index 7cee442145..e191450323 100644 --- a/synapse/module_api/callbacks/spamchecker_callbacks.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py @@ -788,6 +788,7 @@ class SpamCheckerModuleApiCallbacks: return RegistrationBehaviour.ALLOW + @trace async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index fa8be214ce..8cebeb5189 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -27,6 +27,7 @@ from typing import ( ) from synapse.api.constants import Direction +from synapse.logging.opentracing import trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -328,6 +329,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "get_local_media_ids", _get_local_media_ids_txn ) + @trace async def store_local_media( self, media_id: str, @@ -447,6 +449,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="get_local_media_thumbnails", ) + @trace async def store_local_thumbnail( self, media_id: str, @@ -568,6 +571,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="get_remote_media_thumbnails", ) + @trace async def get_remote_media_thumbnail( self, origin: str, @@ -599,6 +603,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="get_remote_media_thumbnail", ) + @trace async def store_remote_media_thumbnail( self, origin: str, From 39d131b016673bbd4d3c28095c8838b8c6dc0953 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Jul 2023 17:25:00 +0100 Subject: [PATCH 188/562] Add basic read/write lock (#15782) --- changelog.d/15782.misc | 1 + synapse/_scripts/synapse_port_db.py | 9 +- synapse/storage/databases/main/lock.py | 226 ++++++++++---- .../04_read_write_locks_triggers.sql.postgres | 152 ++++++++++ .../04_read_write_locks_triggers.sql.sqlite | 119 ++++++++ tests/storage/databases/main/test_lock.py | 283 +++++++++++++++++- 6 files changed, 731 insertions(+), 59 deletions(-) create mode 100644 changelog.d/15782.misc create mode 100644 synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres create mode 100644 synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite diff --git a/changelog.d/15782.misc b/changelog.d/15782.misc new file mode 100644 index 0000000000..aae493b973 --- /dev/null +++ b/changelog.d/15782.misc @@ -0,0 +1 @@ +Add read/write style cross-worker locks. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index e126a2e0c5..7c4aa0afa2 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -197,6 +197,11 @@ IGNORED_TABLES = { "ui_auth_sessions", "ui_auth_sessions_credentials", "ui_auth_sessions_ips", + # Ignore the worker locks table, as a) there shouldn't be any acquired locks + # after porting, and b) the circular foreign key constraints make it hard to + # port. + "worker_read_write_locks_mode", + "worker_read_write_locks", } @@ -805,7 +810,9 @@ class Porter: ) # Map from table name to args passed to `handle_table`, i.e. a tuple # of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`. - tables_to_port_info_map = {r[0]: r[1:] for r in setup_res} + tables_to_port_info_map = { + r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES + } # Step 5. Do the copying. # diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 7270ef09da..c89b4f7919 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -25,6 +25,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.engines import PostgresEngine from synapse.util import Clock from synapse.util.stringutils import random_string @@ -68,12 +69,20 @@ class LockStore(SQLBaseStore): self._reactor = hs.get_reactor() self._instance_name = hs.get_instance_id() - # A map from `(lock_name, lock_key)` to the token of any locks that we - # think we currently hold. - self._live_tokens: WeakValueDictionary[ + # A map from `(lock_name, lock_key)` to lock that we think we + # currently hold. + self._live_lock_tokens: WeakValueDictionary[ Tuple[str, str], Lock ] = WeakValueDictionary() + # A map from `(lock_name, lock_key, token)` to read/write lock that we + # think we currently hold. For a given lock_name/lock_key, there can be + # multiple read locks at a time but only one write lock (no mixing read + # and write locks at the same time). + self._live_read_write_lock_tokens: WeakValueDictionary[ + Tuple[str, str, str], Lock + ] = WeakValueDictionary() + # When we shut down we want to remove the locks. Technically this can # lead to a race, as we may drop the lock while we are still processing. # However, a) it should be a small window, b) the lock is best effort @@ -91,11 +100,13 @@ class LockStore(SQLBaseStore): """Called when the server is shutting down""" logger.info("Dropping held locks due to shutdown") - # We need to take a copy of the tokens dict as dropping the locks will - # cause the dictionary to change. - locks = dict(self._live_tokens) + # We need to take a copy of the locks as dropping the locks will cause + # the dictionary to change. + locks = list(self._live_lock_tokens.values()) + list( + self._live_read_write_lock_tokens.values() + ) - for lock in locks.values(): + for lock in locks: await lock.release() logger.info("Dropped locks due to shutdown") @@ -122,7 +133,7 @@ class LockStore(SQLBaseStore): """ # Check if this process has taken out a lock and if it's still valid. - lock = self._live_tokens.get((lock_name, lock_key)) + lock = self._live_lock_tokens.get((lock_name, lock_key)) if lock and await lock.is_still_valid(): return None @@ -176,61 +187,111 @@ class LockStore(SQLBaseStore): self._reactor, self._clock, self, + read_write=False, lock_name=lock_name, lock_key=lock_key, token=token, ) - self._live_tokens[(lock_name, lock_key)] = lock + self._live_lock_tokens[(lock_name, lock_key)] = lock return lock - async def _is_lock_still_valid( - self, lock_name: str, lock_key: str, token: str - ) -> bool: - """Checks whether this instance still holds the lock.""" - last_renewed_ts = await self.db_pool.simple_select_one_onecol( - table="worker_locks", - keyvalues={ - "lock_name": lock_name, - "lock_key": lock_key, - "token": token, - }, - retcol="last_renewed_ts", - allow_none=True, - desc="is_lock_still_valid", - ) - return ( - last_renewed_ts is not None - and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts + async def try_acquire_read_write_lock( + self, + lock_name: str, + lock_key: str, + write: bool, + ) -> Optional["Lock"]: + """Try to acquire a lock for the given name/key. Will return an async + context manager if the lock is successfully acquired, which *must* be + used (otherwise the lock will leak). + """ + + now = self._clock.time_msec() + token = random_string(6) + + def _try_acquire_read_write_lock_txn(txn: LoggingTransaction) -> None: + # We attempt to acquire the lock by inserting into + # `worker_read_write_locks` and seeing if that fails any + # constraints. If it doesn't then we have acquired the lock, + # otherwise we haven't. + # + # Before that though we clear the table of any stale locks. + + delete_sql = """ + DELETE FROM worker_read_write_locks + WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?; + """ + + insert_sql = """ + INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts) + VALUES (?, ?, ?, ?, ?, ?) + """ + + if isinstance(self.database_engine, PostgresEngine): + # For Postgres we can send these queries at the same time. + txn.execute( + delete_sql + ";" + insert_sql, + ( + # DELETE args + now - _LOCK_TIMEOUT_MS, + lock_name, + lock_key, + # UPSERT args + lock_name, + lock_key, + write, + self._instance_name, + token, + now, + ), + ) + else: + # For SQLite these need to be two queries. + txn.execute( + delete_sql, + ( + now - _LOCK_TIMEOUT_MS, + lock_name, + lock_key, + ), + ) + txn.execute( + insert_sql, + ( + lock_name, + lock_key, + write, + self._instance_name, + token, + now, + ), + ) + + return + + try: + await self.db_pool.runInteraction( + "try_acquire_read_write_lock", + _try_acquire_read_write_lock_txn, + ) + except self.database_engine.module.IntegrityError: + return None + + lock = Lock( + self._reactor, + self._clock, + self, + read_write=True, + lock_name=lock_name, + lock_key=lock_key, + token=token, ) - async def _renew_lock(self, lock_name: str, lock_key: str, token: str) -> None: - """Attempt to renew the lock if we still hold it.""" - await self.db_pool.simple_update( - table="worker_locks", - keyvalues={ - "lock_name": lock_name, - "lock_key": lock_key, - "token": token, - }, - updatevalues={"last_renewed_ts": self._clock.time_msec()}, - desc="renew_lock", - ) + self._live_read_write_lock_tokens[(lock_name, lock_key, token)] = lock - async def _drop_lock(self, lock_name: str, lock_key: str, token: str) -> None: - """Attempt to drop the lock, if we still hold it""" - await self.db_pool.simple_delete( - table="worker_locks", - keyvalues={ - "lock_name": lock_name, - "lock_key": lock_key, - "token": token, - }, - desc="drop_lock", - ) - - self._live_tokens.pop((lock_name, lock_key), None) + return lock class Lock: @@ -259,6 +320,7 @@ class Lock: reactor: IReactorCore, clock: Clock, store: LockStore, + read_write: bool, lock_name: str, lock_key: str, token: str, @@ -266,13 +328,23 @@ class Lock: self._reactor = reactor self._clock = clock self._store = store + self._read_write = read_write self._lock_name = lock_name self._lock_key = lock_key self._token = token + self._table = "worker_read_write_locks" if read_write else "worker_locks" + self._looping_call = clock.looping_call( - self._renew, _RENEWAL_INTERVAL_MS, store, lock_name, lock_key, token + self._renew, + _RENEWAL_INTERVAL_MS, + store, + clock, + read_write, + lock_name, + lock_key, + token, ) self._dropped = False @@ -281,6 +353,8 @@ class Lock: @wrap_as_background_process("Lock._renew") async def _renew( store: LockStore, + clock: Clock, + read_write: bool, lock_name: str, lock_key: str, token: str, @@ -291,12 +365,34 @@ class Lock: don't end up with a reference to `self` in the reactor, which would stop this from being cleaned up if we dropped the context manager. """ - await store._renew_lock(lock_name, lock_key, token) + table = "worker_read_write_locks" if read_write else "worker_locks" + await store.db_pool.simple_update( + table=table, + keyvalues={ + "lock_name": lock_name, + "lock_key": lock_key, + "token": token, + }, + updatevalues={"last_renewed_ts": clock.time_msec()}, + desc="renew_lock", + ) async def is_still_valid(self) -> bool: """Check if the lock is still held by us""" - return await self._store._is_lock_still_valid( - self._lock_name, self._lock_key, self._token + last_renewed_ts = await self._store.db_pool.simple_select_one_onecol( + table=self._table, + keyvalues={ + "lock_name": self._lock_name, + "lock_key": self._lock_key, + "token": self._token, + }, + retcol="last_renewed_ts", + allow_none=True, + desc="is_lock_still_valid", + ) + return ( + last_renewed_ts is not None + and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts ) async def __aenter__(self) -> None: @@ -325,7 +421,23 @@ class Lock: if self._looping_call.running: self._looping_call.stop() - await self._store._drop_lock(self._lock_name, self._lock_key, self._token) + await self._store.db_pool.simple_delete( + table=self._table, + keyvalues={ + "lock_name": self._lock_name, + "lock_key": self._lock_key, + "token": self._token, + }, + desc="drop_lock", + ) + + if self._read_write: + self._store._live_read_write_lock_tokens.pop( + (self._lock_name, self._lock_key, self._token), None + ) + else: + self._store._live_lock_tokens.pop((self._lock_name, self._lock_key), None) + self._dropped = True def __del__(self) -> None: diff --git a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres new file mode 100644 index 0000000000..e1a41be9c9 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres @@ -0,0 +1,152 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- We implement read/write style locks by using two tables with mutual foreign +-- key constraints. Note that this implementation is vulnerable to starving +-- writers if read locks repeatedly get acquired. +-- +-- The first table (`worker_read_write_locks_mode`) indicates that a given lock +-- has either been acquired in read mode *or* write mode, but not both. This is +-- enforced by the unique constraint. Each instance of a lock being acquired is +-- associated with a random `token`. +-- +-- The second table (`worker_read_write_locks`) tracks who has currently +-- acquired a given lock. For a given lock_name/lock_key, there can be multiple +-- read locks at a time but only one write lock (no mixing read and write locks +-- at the same time). +-- +-- The foreign key from the second to first table enforces that for any given +-- lock the second table cannot have a mix of rows with read or write. +-- +-- The foreign key from the first to second table enforces that we don't have a +-- row for a lock in the first table if not in the second table. +-- +-- +-- Furthermore, we add some triggers to automatically keep the first table up to +-- date when inserting/deleting from the second table. This reduces the number +-- of round trips needed to acquire and release locks, as those operations +-- simply become an INSERT or DELETE. These triggers are added in a separate +-- delta due to database specific syntax. + + +-- A table to track whether a lock is currently acquired, and if so whether its +-- in read or write mode. +CREATE TABLE worker_read_write_locks_mode ( + lock_name TEXT NOT NULL, + lock_key TEXT NOT NULL, + -- Whether this lock is in read (false) or write (true) mode + write_lock BOOLEAN NOT NULL, + -- A token that has currently acquired the lock. We need this so that we can + -- add a foreign constraint from this table to `worker_read_write_locks`. + token TEXT NOT NULL +); + +-- Ensure that we can only have one row per lock +CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key); +-- We need this (redundant) constraint so that we can have a foreign key +-- constraint against this table. +CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock); + + +-- A table to track who has currently acquired a given lock. +CREATE TABLE worker_read_write_locks ( + lock_name TEXT NOT NULL, + lock_key TEXT NOT NULL, + -- We write the instance name to ease manual debugging, we don't ever read + -- from it. + -- Note: instance names aren't guarenteed to be unique. + instance_name TEXT NOT NULL, + -- Whether the process has taken out a "read" or a "write" lock. + write_lock BOOLEAN NOT NULL, + -- A random string generated each time an instance takes out a lock. Used by + -- the instance to tell whether the lock is still held by it (e.g. in the + -- case where the process stalls for a long time the lock may time out and + -- be taken out by another instance, at which point the original instance + -- can tell it no longer holds the lock as the tokens no longer match). + token TEXT NOT NULL, + last_renewed_ts BIGINT NOT NULL, + + -- This constraint ensures that a given lock has only been acquired in read + -- xor write mode, but not both. + FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock) +); + +CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token); +-- Ensures that only one instance can acquire a lock in write mode at a time. +CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock; + + +-- Add a foreign key constraint to ensure that if a lock is in +-- `worker_read_write_locks_mode` then there must be a corresponding row in +-- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in +-- `worker_read_write_locks_mode` when the lock is not currently acquired). +-- +-- We only add to PostgreSQL as SQLite does not support adding constraints +-- after table creation, and so doesn't support "circular" foreign key +-- constraints. +ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign + FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED; + + +-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try +-- and acquire a lock, i.e. insert into `worker_read_write_locks`, +CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$ +BEGIN + INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token) + VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token) + ON CONFLICT (lock_name, lock_key) + DO NOTHING; + RETURN NEW; +END +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks + FOR EACH ROW + EXECUTE PROCEDURE upsert_read_write_lock_parent(); + + +-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock +-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we +-- update the `worker_read_write_locks_mode.token` to match another instance +-- that has currently acquired the lock, or we delete the row if nobody has +-- currently acquired a lock. +CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$ +DECLARE + new_token TEXT; +BEGIN + SELECT token INTO new_token FROM worker_read_write_locks + WHERE + lock_name = OLD.lock_name + AND lock_key = OLD.lock_key; + + IF NOT FOUND THEN + DELETE FROM worker_read_write_locks_mode + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; + ELSE + UPDATE worker_read_write_locks_mode + SET token = new_token + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; + END IF; + + RETURN NEW; +END +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks + FOR EACH ROW + EXECUTE PROCEDURE delete_read_write_lock_parent(); diff --git a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite new file mode 100644 index 0000000000..be2dfbbb8a --- /dev/null +++ b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite @@ -0,0 +1,119 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- c.f. the postgres version for context. The tables and constraints are the +-- same, however they need to be defined slightly differently to work around how +-- each database handles circular foreign key references. + + + +-- A table to track whether a lock is currently acquired, and if so whether its +-- in read or write mode. +CREATE TABLE worker_read_write_locks_mode ( + lock_name TEXT NOT NULL, + lock_key TEXT NOT NULL, + -- Whether this lock is in read (false) or write (true) mode + write_lock BOOLEAN NOT NULL, + -- A token that has currently acquired the lock. We need this so that we can + -- add a foreign constraint from this table to `worker_read_write_locks`. + token TEXT NOT NULL, + -- Add a foreign key constraint to ensure that if a lock is in + -- `worker_read_write_locks_mode` then there must be a corresponding row in + -- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in + -- `worker_read_write_locks_mode` when the lock is not currently acquired). + FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED +); + +-- Ensure that we can only have one row per lock +CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key); +-- We need this (redundant) constraint so that we can have a foreign key +-- constraint against this table. +CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock); + + +-- A table to track who has currently acquired a given lock. +CREATE TABLE worker_read_write_locks ( + lock_name TEXT NOT NULL, + lock_key TEXT NOT NULL, + -- We write the instance name to ease manual debugging, we don't ever read + -- from it. + -- Note: instance names aren't guarenteed to be unique. + instance_name TEXT NOT NULL, + -- Whether the process has taken out a "read" or a "write" lock. + write_lock BOOLEAN NOT NULL, + -- A random string generated each time an instance takes out a lock. Used by + -- the instance to tell whether the lock is still held by it (e.g. in the + -- case where the process stalls for a long time the lock may time out and + -- be taken out by another instance, at which point the original instance + -- can tell it no longer holds the lock as the tokens no longer match). + token TEXT NOT NULL, + last_renewed_ts BIGINT NOT NULL, + + -- This constraint ensures that a given lock has only been acquired in read + -- xor write mode, but not both. + FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock) +); + +CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token); +-- Ensures that only one instance can acquire a lock in write mode at a time. +CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock; + + +-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try +-- and acquire a lock, i.e. insert into `worker_read_write_locks`, +CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger +BEFORE INSERT ON worker_read_write_locks +FOR EACH ROW +BEGIN + -- First ensure that `worker_read_write_locks_mode` doesn't have stale + -- entries in it, as on SQLite we don't have the foreign key constraint to + -- enforce this. + DELETE FROM worker_read_write_locks_mode + WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key + AND NOT EXISTS ( + SELECT 1 FROM worker_read_write_locks + WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key + ); + + INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token) + VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token) + ON CONFLICT (lock_name, lock_key) + DO NOTHING; +END; + +-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock +-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we +-- update the `worker_read_write_locks_mode.token` to match another instance +-- that has currently acquired the lock, or we delete the row if nobody has +-- currently acquired a lock. +CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger +AFTER DELETE ON worker_read_write_locks +FOR EACH ROW +BEGIN + DELETE FROM worker_read_write_locks_mode + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key + AND NOT EXISTS ( + SELECT 1 FROM worker_read_write_locks + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key + ); + + UPDATE worker_read_write_locks_mode + SET token = ( + SELECT token FROM worker_read_write_locks + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key + ) + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; +END; diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 56cb49d9b5..ad454f6dd8 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -166,4 +166,285 @@ class LockTestCase(unittest.HomeserverTestCase): # Now call the shutdown code self.get_success(self.store._on_shutdown()) - self.assertEqual(self.store._live_tokens, {}) + self.assertEqual(self.store._live_lock_tokens, {}) + + +class ReadWriteLockTestCase(unittest.HomeserverTestCase): + """Test the read/write lock implementation.""" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_acquire_write_contention(self) -> None: + """Test that we can only acquire one write lock at a time""" + # Track the number of tasks holding the lock. + # Should be at most 1. + in_lock = 0 + max_in_lock = 0 + + release_lock: "Deferred[None]" = Deferred() + + async def task() -> None: + nonlocal in_lock + nonlocal max_in_lock + + lock = await self.store.try_acquire_read_write_lock( + "name", "key", write=True + ) + if not lock: + return + + async with lock: + in_lock += 1 + max_in_lock = max(max_in_lock, in_lock) + + # Block to allow other tasks to attempt to take the lock. + await release_lock + + in_lock -= 1 + + # Start 3 tasks. + task1 = defer.ensureDeferred(task()) + task2 = defer.ensureDeferred(task()) + task3 = defer.ensureDeferred(task()) + + # Give the reactor a kick so that the database transaction returns. + self.pump() + + release_lock.callback(None) + + # Run the tasks to completion. + # To work around `Linearizer`s using a different reactor to sleep when + # contended (#12841), we call `runUntilCurrent` on + # `twisted.internet.reactor`, which is a different reactor to that used + # by the homeserver. + assert isinstance(reactor, ReactorBase) + self.get_success(task1) + reactor.runUntilCurrent() + self.get_success(task2) + reactor.runUntilCurrent() + self.get_success(task3) + + # At most one task should have held the lock at a time. + self.assertEqual(max_in_lock, 1) + + def test_acquire_multiple_reads(self) -> None: + """Test that we can acquire multiple read locks at a time""" + # Track the number of tasks holding the lock. + in_lock = 0 + max_in_lock = 0 + + release_lock: "Deferred[None]" = Deferred() + + async def task() -> None: + nonlocal in_lock + nonlocal max_in_lock + + lock = await self.store.try_acquire_read_write_lock( + "name", "key", write=False + ) + if not lock: + return + + async with lock: + in_lock += 1 + max_in_lock = max(max_in_lock, in_lock) + + # Block to allow other tasks to attempt to take the lock. + await release_lock + + in_lock -= 1 + + # Start 3 tasks. + task1 = defer.ensureDeferred(task()) + task2 = defer.ensureDeferred(task()) + task3 = defer.ensureDeferred(task()) + + # Give the reactor a kick so that the database transaction returns. + self.pump() + + release_lock.callback(None) + + # Run the tasks to completion. + # To work around `Linearizer`s using a different reactor to sleep when + # contended (#12841), we call `runUntilCurrent` on + # `twisted.internet.reactor`, which is a different reactor to that used + # by the homeserver. + assert isinstance(reactor, ReactorBase) + self.get_success(task1) + reactor.runUntilCurrent() + self.get_success(task2) + reactor.runUntilCurrent() + self.get_success(task3) + + # At most one task should have held the lock at a time. + self.assertEqual(max_in_lock, 3) + + def test_write_lock_acquired(self) -> None: + """Test that we can take out a write lock and that while we hold it + nobody else can take it out. + """ + # First to acquire this lock, so it should complete + lock = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + assert lock is not None + + # Enter the context manager + self.get_success(lock.__aenter__()) + + # Attempting to acquire the lock again fails, as both read and write. + lock2 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNone(lock2) + + lock3 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=False) + ) + self.assertIsNone(lock3) + + # Calling `is_still_valid` reports true. + self.assertTrue(self.get_success(lock.is_still_valid())) + + # Drop the lock + self.get_success(lock.__aexit__(None, None, None)) + + # We can now acquire the lock again. + lock4 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + assert lock4 is not None + self.get_success(lock4.__aenter__()) + self.get_success(lock4.__aexit__(None, None, None)) + + def test_read_lock_acquired(self) -> None: + """Test that we can take out a read lock and that while we hold it + only other reads can use it. + """ + # First to acquire this lock, so it should complete + lock = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=False) + ) + assert lock is not None + + # Enter the context manager + self.get_success(lock.__aenter__()) + + # Attempting to acquire the write lock fails + lock2 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNone(lock2) + + # Attempting to acquire a read lock succeeds + lock3 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=False) + ) + assert lock3 is not None + self.get_success(lock3.__aenter__()) + + # Calling `is_still_valid` reports true. + self.assertTrue(self.get_success(lock.is_still_valid())) + + # Drop the first lock + self.get_success(lock.__aexit__(None, None, None)) + + # Attempting to acquire the write lock still fails, as lock3 is still + # active. + lock4 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNone(lock4) + + # Drop the still open third lock + self.get_success(lock3.__aexit__(None, None, None)) + + # We can now acquire the lock again. + lock5 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + assert lock5 is not None + self.get_success(lock5.__aenter__()) + self.get_success(lock5.__aexit__(None, None, None)) + + def test_maintain_lock(self) -> None: + """Test that we don't time out locks while they're still active (lock is + renewed in the background if the process is still alive)""" + + lock = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + assert lock is not None + + self.get_success(lock.__aenter__()) + + # Wait for ages with the lock, we should not be able to get the lock. + self.reactor.advance(5 * _LOCK_TIMEOUT_MS / 1000) + self.pump() + + lock2 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNone(lock2) + + self.get_success(lock.__aexit__(None, None, None)) + + def test_timeout_lock(self) -> None: + """Test that we time out locks if they're not updated for ages""" + + lock = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + assert lock is not None + + self.get_success(lock.__aenter__()) + + # We simulate the process getting stuck by cancelling the looping call + # that keeps the lock active. + lock._looping_call.stop() + + # Wait for the lock to timeout. + self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000) + + lock2 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNotNone(lock2) + + self.assertFalse(self.get_success(lock.is_still_valid())) + + def test_drop(self) -> None: + """Test that dropping the context manager means we stop renewing the lock""" + + lock = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNotNone(lock) + + del lock + + # Wait for the lock to timeout. + self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000) + + lock2 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNotNone(lock2) + + def test_shutdown(self) -> None: + """Test that shutting down Synapse releases the locks""" + # Acquire two locks + lock = self.get_success( + self.store.try_acquire_read_write_lock("name", "key", write=True) + ) + self.assertIsNotNone(lock) + lock2 = self.get_success( + self.store.try_acquire_read_write_lock("name", "key2", write=True) + ) + self.assertIsNotNone(lock2) + + # Now call the shutdown code + self.get_success(self.store._on_shutdown()) + + self.assertEqual(self.store._live_read_write_lock_tokens, {}) From 561d06b481176f61ed12f5a4723b127ff8624662 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 5 Jul 2023 18:45:42 -0500 Subject: [PATCH 189/562] Remove support for Python 3.7 (#15851) Fix https://github.com/matrix-org/synapse/issues/15836 --- .ci/scripts/calculate_jobs.py | 11 ++--- .github/workflows/release-artifacts.yml | 2 +- .github/workflows/tests.yml | 6 +-- changelog.d/15851.removal | 1 + docker/Dockerfile-dhvirtualenv | 50 +++++++++++----------- docs/setup/installation.md | 2 +- docs/upgrade.md | 12 ++++++ poetry.lock | 55 +------------------------ pyproject.toml | 5 +-- synapse/__init__.py | 4 +- synapse/util/check_dependencies.py | 5 +-- tests/metrics/test_metrics.py | 10 +---- 12 files changed, 55 insertions(+), 108 deletions(-) create mode 100644 changelog.d/15851.removal diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index b41ec0b6e2..c2c18b48e3 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -29,11 +29,12 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/") # First calculate the various trial jobs. # -# For each type of test we only run on Py3.7 on PRs +# For PRs, we only run each type of test with the oldest Python version supported (which +# is Python 3.8 right now) trial_sqlite_tests = [ { - "python-version": "3.7", + "python-version": "3.8", "database": "sqlite", "extras": "all", } @@ -46,13 +47,13 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.8", "3.9", "3.10", "3.11") + for version in ("3.9", "3.10", "3.11") ) trial_postgres_tests = [ { - "python-version": "3.7", + "python-version": "3.8", "database": "postgres", "postgres-version": "11", "extras": "all", @@ -71,7 +72,7 @@ if not IS_PR: trial_no_extra_tests = [ { - "python-version": "3.7", + "python-version": "3.8", "database": "sqlite", "extras": "", } diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 0981200401..f331f67d97 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -144,7 +144,7 @@ jobs: - name: Only build a single wheel on PR if: startsWith(github.ref, 'refs/pull/') - run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV + run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV - name: Build wheels run: python -m cibuildwheel --output-dir wheelhouse diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6c22984997..0a01e82984 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -320,7 +320,7 @@ jobs: - uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.8' - name: Prepare old deps if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true' @@ -362,7 +362,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["pypy-3.7"] + python-version: ["pypy-3.8"] extras: ["all"] steps: @@ -477,7 +477,7 @@ jobs: strategy: matrix: include: - - python-version: "3.7" + - python-version: "3.8" postgres-version: "11" - python-version: "3.11" diff --git a/changelog.d/15851.removal b/changelog.d/15851.removal new file mode 100644 index 0000000000..e08df4c136 --- /dev/null +++ b/changelog.d/15851.removal @@ -0,0 +1 @@ +Remove support for Python 3.7. diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 861129ebc2..b7679924c2 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -28,12 +28,12 @@ FROM docker.io/library/${distro} as builder RUN apt-get update -qq -o Acquire::Languages=none RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ - -yqq --no-install-recommends \ - build-essential \ - ca-certificates \ - devscripts \ - equivs \ - wget + -yqq --no-install-recommends \ + build-essential \ + ca-certificates \ + devscripts \ + equivs \ + wget # fetch and unpack the package # We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with @@ -62,33 +62,29 @@ FROM docker.io/library/${distro} ARG distro="" ENV distro ${distro} -# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode -# http://bugs.python.org/issue19846 -ENV LANG C.UTF-8 - # Install the build dependencies # # NB: keep this list in sync with the list of build-deps in debian/control # TODO: it would be nice to do that automatically. RUN apt-get update -qq -o Acquire::Languages=none \ && env DEBIAN_FRONTEND=noninteractive apt-get install \ - -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \ - build-essential \ - curl \ - debhelper \ - devscripts \ - libsystemd-dev \ - lsb-release \ - pkg-config \ - python3-dev \ - python3-pip \ - python3-setuptools \ - python3-venv \ - sqlite3 \ - libpq-dev \ - libicu-dev \ - pkg-config \ - xmlsec1 + -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \ + build-essential \ + curl \ + debhelper \ + devscripts \ + libsystemd-dev \ + lsb-release \ + pkg-config \ + python3-dev \ + python3-pip \ + python3-setuptools \ + python3-venv \ + sqlite3 \ + libpq-dev \ + libicu-dev \ + pkg-config \ + xmlsec1 # Install rust and ensure it's in the PATH ENV RUSTUP_HOME=/rust diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 86e506a3e2..4ca8c6b697 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.7 or later, up to Python 3.11. +- Python 3.8 or later, up to Python 3.11. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org If building on an uncommon architecture for which pre-built wheels are diff --git a/docs/upgrade.md b/docs/upgrade.md index 4cd38b1393..384f4010b4 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -87,6 +87,18 @@ process, for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` + +# Upgrading to v1.88.0 + +## Minimum supported Python version + +The minimum supported Python version has been increased from v3.7 to v3.8. +You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023). + +If you use current versions of the Matrix.org-distributed Debian +packages or Docker images, no action is required. + + # Upgrading to v1.86.0 ## Minimum supported Rust version diff --git a/poetry.lock b/poetry.lock index 9aaf5c7de7..c62337053e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -41,9 +41,6 @@ files = [ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, ] -[package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} - [package.extras] cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] dev = ["attrs[docs,tests]", "pre-commit"] @@ -190,7 +187,6 @@ packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} [package.extras] @@ -412,7 +408,6 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "click-default-group" @@ -601,7 +596,6 @@ files = [ [package.dependencies] gitdb = ">=4.0.1,<5" -typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [[package]] name = "hiredis" @@ -847,7 +841,6 @@ files = [ ] [package.dependencies] -typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] @@ -987,11 +980,9 @@ files = [ [package.dependencies] attrs = ">=17.4.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" -typing-extensions = {version = "*", markers = "python_version < \"3.8\""} [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] @@ -1199,7 +1190,6 @@ files = [ [package.dependencies] mdurl = ">=0.1,<1.0" -typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [package.extras] benchmarking = ["psutil", "pytest", "pytest-benchmark"] @@ -1283,7 +1273,6 @@ files = [ [package.dependencies] attrs = "*" -importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"] @@ -1459,7 +1448,6 @@ files = [ [package.dependencies] mypy-extensions = ">=0.4.3" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} typing-extensions = ">=3.10" [package.extras] @@ -1721,9 +1709,6 @@ files = [ {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""} - [package.extras] docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] @@ -2060,7 +2045,6 @@ files = [ [package.dependencies] cryptography = ">=3.1" defusedxml = "*" -importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""} importlib-resources = {version = "*", markers = "python_version < \"3.9\""} pyopenssl = "*" python-dateutil = "*" @@ -2410,9 +2394,7 @@ files = [ [package.dependencies] canonicaljson = ">=1.0.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} pynacl = ">=0.3.0" -typing-extensions = {version = ">=3.5", markers = "python_version < \"3.8\""} unpaddedbase64 = ">=1.0.1" [package.extras] @@ -2852,39 +2834,6 @@ files = [ six = "*" twisted = "*" -[[package]] -name = "typed-ast" -version = "1.5.4" -description = "a fork of Python 2 and 3 ast modules with type comment support" -optional = false -python-versions = ">=3.6" -files = [ - {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, - {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, - {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, - {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, - {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, - {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, - {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, - {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, - {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, - {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, - {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, - {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, - {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, - {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, - {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, - {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, - {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, - {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, - {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, - {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, - {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, - {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, - {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, - {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, -] - [[package]] name = "types-bleach" version = "6.0.0.3" @@ -3293,5 +3242,5 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" -python-versions = "^3.7.1" -content-hash = "7f31754a1009d7b6c9a1bd7221a0b243ffd510f362c28f0da417aaac16757a87" +python-versions = "^3.8.0" +content-hash = "0832381cc9e7065e8d95c810d732aa031b98d55cf188719989b12d841993e62e" diff --git a/pyproject.toml b/pyproject.toml index 192a07756b..a6e3a935a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -147,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main" update_synapse_database = "synapse._scripts.update_synapse_database:main" [tool.poetry.dependencies] -python = "^3.7.1" +python = "^3.8.0" # Mandatory Dependencies # ---------------------- @@ -203,9 +203,6 @@ ijson = ">=3.1.4" matrix-common = "^1.3.0" # We need packaging.requirements.Requirement, added in 16.1. packaging = ">=16.1" -# At the time of writing, we only use functions from the version `importlib.metadata` -# which shipped in Python 3.8. This corresponds to version 1.4 of the backport. -importlib_metadata = { version = ">=1.4", python = "<3.8" } # This is the most recent version of Pydantic with available on common distros. # We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858) pydantic = "^1.7.4" diff --git a/synapse/__init__.py b/synapse/__init__.py index b97ee59f15..6c1801862b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -25,8 +25,8 @@ from synapse.util.rust import check_rust_lib_up_to_date from synapse.util.stringutils import strtobool # Check that we're not running on an unsupported Python version. -if sys.version_info < (3, 7): - print("Synapse requires Python 3.7 or above.") +if sys.version_info < (3, 8): + print("Synapse requires Python 3.8 or above.") sys.exit(1) # Allow using the asyncio reactor via env var. diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 1c0fde4966..114130a08f 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -21,16 +21,13 @@ require. But this is probably just symptomatic of Python's package management. """ import logging +from importlib import metadata from typing import Iterable, NamedTuple, Optional from packaging.requirements import Requirement DISTRIBUTION_NAME = "matrix-synapse" -try: - from importlib import metadata -except ImportError: - import importlib_metadata as metadata # type: ignore[no-redef] __all__ = ["check_requirements"] diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index 7c3656d049..d14876826c 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -12,19 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from importlib import metadata from typing import Dict, Tuple - -from typing_extensions import Protocol - -try: - from importlib import metadata -except ImportError: - import importlib_metadata as metadata # type: ignore[no-redef] - from unittest.mock import patch from pkg_resources import parse_version from prometheus_client.core import Sample +from typing_extensions import Protocol from synapse.app._base import _set_prometheus_client_use_created_metrics from synapse.metrics import REGISTRY, InFlightGauge, generate_latest From b07b14b494ae1dd564b4c44f844c9a9545b3d08a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 5 Jul 2023 18:53:55 -0500 Subject: [PATCH 190/562] Federation outbound proxy (#15773) Allow configuring the set of workers to proxy outbound federation traffic through (`outbound_federation_restricted_to`). This is useful when you have a worker setup with `federation_sender` instances responsible for sending outbound federation requests and want to make sure *all* outbound federation traffic goes through those instances. Before this change, the generic workers would still contact federation themselves for things like profile lookups, backfill, etc. This PR allows you to set more strict access controls/firewall for all workers and only allow the `federation_sender`'s to contact the outside world. The original code is from @erikjohnston's branches which I've gotten in-shape to merge. --- changelog.d/15773.feature | 1 + .../configuration/config_documentation.md | 31 ++- docs/workers.md | 20 ++ synapse/app/_base.py | 2 + synapse/app/generic_worker.py | 1 + synapse/app/homeserver.py | 1 + synapse/config/workers.py | 40 ++- synapse/http/client.py | 7 +- synapse/http/matrixfederationclient.py | 132 +++++++++- synapse/http/proxy.py | 249 ++++++++++++++++++ synapse/http/proxyagent.py | 79 +++++- synapse/http/server.py | 55 ++-- synapse/http/site.py | 26 +- tests/app/test_openid_listener.py | 8 +- tests/handlers/test_device.py | 3 +- tests/handlers/test_federation.py | 2 +- tests/handlers/test_presence.py | 1 - tests/handlers/test_typing.py | 10 + tests/http/test_matrixfederationclient.py | 189 ++++++++++++- tests/http/test_proxy.py | 53 ++++ tests/replication/_base.py | 3 +- .../test_federation_sender_shard.py | 22 +- tests/rest/client/test_presence.py | 1 - tests/rest/client/test_rooms.py | 2 - tests/storage/test_e2e_room_keys.py | 2 +- tests/storage/test_purge.py | 2 +- tests/storage/test_rollback_worker.py | 4 +- tests/test_server.py | 33 ++- tests/unittest.py | 1 + 29 files changed, 890 insertions(+), 90 deletions(-) create mode 100644 changelog.d/15773.feature create mode 100644 synapse/http/proxy.py create mode 100644 tests/http/test_proxy.py diff --git a/changelog.d/15773.feature b/changelog.d/15773.feature new file mode 100644 index 0000000000..0d77fae2dc --- /dev/null +++ b/changelog.d/15773.feature @@ -0,0 +1 @@ +Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 26d7c7900c..89a92c4682 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3930,13 +3930,14 @@ federation_sender_instances: --- ### `instance_map` -When using workers this should be a map from [`worker_name`](#worker_name) to the -HTTP replication listener of the worker, if configured, and to the main process. -Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs -a HTTP replication listener, and that listener should be included in the `instance_map`. -The main process also needs an entry on the `instance_map`, and it should be listed under -`main` **if even one other worker exists**. Ensure the port matches with what is declared -inside the `listener` block for a `replication` listener. +When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP +replication listener of the worker, if configured, and to the main process. Each worker +declared under [`stream_writers`](../../workers.md#stream-writers) and +[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP replication listener, and that +listener should be included in the `instance_map`. The main process also needs an entry +on the `instance_map`, and it should be listed under `main` **if even one other worker +exists**. Ensure the port matches with what is declared inside the `listener` block for +a `replication` listener. Example configuration: @@ -3966,6 +3967,22 @@ stream_writers: typing: worker1 ``` --- +### `outbound_federation_restricted_to` + +When using workers, you can restrict outbound federation traffic to only go through a +specific subset of workers. Any worker specified here must also be in the +[`instance_map`](#instance_map). + +```yaml +outbound_federation_restricted_to: + - federation_sender1 + - federation_sender2 +``` + +Also see the [worker +documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) +for more info. +--- ### `run_background_tasks_on` The [worker](../../workers.md#background-tasks) that is used to run diff --git a/docs/workers.md b/docs/workers.md index 735128762a..303e0f0e7a 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -531,6 +531,26 @@ the stream writer for the `presence` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ +#### Restrict outbound federation traffic to a specific set of workers + +The `outbound_federation_restricted_to` configuration is useful to make sure outbound +federation traffic only goes through a specified subset of workers. This allows you to +set more strict access controls (like a firewall) for all workers and only allow the +`federation_sender`'s to contact the outside world. + +```yaml +instance_map: + main: + host: localhost + port: 8030 + federation_sender1: + host: localhost + port: 8034 + +outbound_federation_restricted_to: + - federation_sender1 +``` + #### Background tasks There is also support for moving background tasks to a separate diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 936b1b0430..938ab40f27 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -386,6 +386,7 @@ def listen_unix( def listen_http( + hs: "HomeServer", listener_config: ListenerConfig, root_resource: Resource, version_string: str, @@ -406,6 +407,7 @@ def listen_http( version_string, max_request_body_size=max_request_body_size, reactor=reactor, + federation_agent=hs.get_federation_http_client().agent, ) if isinstance(listener_config, TCPListenerConfig): diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 7406c3948c..dc79efcc14 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -221,6 +221,7 @@ class GenericWorkerServer(HomeServer): root_resource = create_resource_tree(resources, OptionsResource()) _base.listen_http( + self, listener_config, root_resource, self.version_string, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 84236ac299..f188c7265a 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -139,6 +139,7 @@ class SynapseHomeServer(HomeServer): root_resource = OptionsResource() ports = listen_http( + self, listener_config, create_resource_tree(resources, root_resource), self.version_string, diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 38e13dd7b5..0b9789160c 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -15,7 +15,7 @@ import argparse import logging -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Optional, Union import attr from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr @@ -148,6 +148,27 @@ class WriterLocations: ) +@attr.s(auto_attribs=True) +class OutboundFederationRestrictedTo: + """Whether we limit outbound federation to a certain set of instances. + + Attributes: + instances: optional list of instances that can make outbound federation + requests. If None then all instances can make federation requests. + locations: list of instance locations to connect to proxy via. + """ + + instances: Optional[List[str]] + locations: List[InstanceLocationConfig] = attr.Factory(list) + + def __contains__(self, instance: str) -> bool: + # It feels a bit dirty to return `True` if `instances` is `None`, but it makes + # sense in downstream usage in the sense that if + # `outbound_federation_restricted_to` is not configured, then any instance can + # talk to federation (no restrictions so always return `True`). + return self.instances is None or instance in self.instances + + class WorkerConfig(Config): """The workers are processes run separately to the main synapse process. They have their own pid_file and listener configuration. They use the @@ -357,6 +378,23 @@ class WorkerConfig(Config): new_option_name="update_user_directory_from_worker", ) + outbound_federation_restricted_to = config.get( + "outbound_federation_restricted_to", None + ) + self.outbound_federation_restricted_to = OutboundFederationRestrictedTo( + outbound_federation_restricted_to + ) + if outbound_federation_restricted_to: + for instance in outbound_federation_restricted_to: + if instance not in self.instance_map: + raise ConfigError( + "Instance %r is configured in 'outbound_federation_restricted_to' but does not appear in `instance_map` config." + % (instance,) + ) + self.outbound_federation_restricted_to.locations.append( + self.instance_map[instance] + ) + def _should_this_worker_perform_duty( self, config: Dict[str, Any], diff --git a/synapse/http/client.py b/synapse/http/client.py index 09ea93e10d..ca2cdbc6e2 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1037,7 +1037,12 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): if reason.check(ResponseDone): self.deferred.callback(self.length) elif reason.check(PotentialDataLoss): - # stolen from https://github.com/twisted/treq/pull/49/files + # This applies to requests which don't set `Content-Length` or a + # `Transfer-Encoding` in the response because in this case the end of the + # response is indicated by the connection being closed, an event which may + # also be due to a transient network problem or other error. But since this + # behavior is expected of some servers (like YouTube), let's ignore it. + # Stolen from https://github.com/twisted/treq/pull/49/files # http://twistedmatrix.com/trac/ticket/4840 self.deferred.callback(self.length) else: diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index cc4e258b0f..b00396fdc7 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -50,7 +50,7 @@ from twisted.internet.interfaces import IReactorTime from twisted.internet.task import Cooperator from twisted.web.client import ResponseFailed from twisted.web.http_headers import Headers -from twisted.web.iweb import IBodyProducer, IResponse +from twisted.web.iweb import IAgent, IBodyProducer, IResponse import synapse.metrics import synapse.util.retryutils @@ -72,6 +72,7 @@ from synapse.http.client import ( read_body_with_max_size, ) from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent +from synapse.http.proxyagent import ProxyAgent from synapse.http.types import QueryParams from synapse.logging import opentracing from synapse.logging.context import make_deferred_yieldable, run_in_background @@ -393,17 +394,32 @@ class MatrixFederationHttpClient: if hs.config.server.user_agent_suffix: user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix) - federation_agent = MatrixFederationAgent( - self.reactor, - tls_client_options_factory, - user_agent.encode("ascii"), - hs.config.server.federation_ip_range_allowlist, - hs.config.server.federation_ip_range_blocklist, + outbound_federation_restricted_to = ( + hs.config.worker.outbound_federation_restricted_to ) + if hs.get_instance_name() in outbound_federation_restricted_to: + # Talk to federation directly + federation_agent: IAgent = MatrixFederationAgent( + self.reactor, + tls_client_options_factory, + user_agent.encode("ascii"), + hs.config.server.federation_ip_range_allowlist, + hs.config.server.federation_ip_range_blocklist, + ) + else: + # We need to talk to federation via the proxy via one of the configured + # locations + federation_proxies = outbound_federation_restricted_to.locations + federation_agent = ProxyAgent( + self.reactor, + self.reactor, + tls_client_options_factory, + federation_proxies=federation_proxies, + ) # Use a BlocklistingAgentWrapper to prevent circumventing the IP # blocking via IP literals in server names - self.agent = BlocklistingAgentWrapper( + self.agent: IAgent = BlocklistingAgentWrapper( federation_agent, ip_blocklist=hs.config.server.federation_ip_range_blocklist, ) @@ -412,7 +428,6 @@ class MatrixFederationHttpClient: self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000 - self.max_long_retry_delay_seconds = ( hs.config.federation.max_long_retry_delay_ms / 1000 ) @@ -1131,6 +1146,101 @@ class MatrixFederationHttpClient: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. + Raises: + HttpResponseException: If we get an HTTP response code >= 300 + (except 429). + NotRetryingDestination: If we are not yet ready to retry this + server. + FederationDeniedError: If this destination is not on our + federation whitelist + RequestSendFailed: If there were problems connecting to the + remote, due to e.g. DNS failures, connection timeouts etc. + """ + json_dict, _ = await self.get_json_with_headers( + destination=destination, + path=path, + args=args, + retry_on_dns_fail=retry_on_dns_fail, + timeout=timeout, + ignore_backoff=ignore_backoff, + try_trailing_slash_on_400=try_trailing_slash_on_400, + parser=parser, + ) + return json_dict + + @overload + async def get_json_with_headers( + self, + destination: str, + path: str, + args: Optional[QueryParams] = None, + retry_on_dns_fail: bool = True, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Literal[None] = None, + ) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]: + ... + + @overload + async def get_json_with_headers( + self, + destination: str, + path: str, + args: Optional[QueryParams] = ..., + retry_on_dns_fail: bool = ..., + timeout: Optional[int] = ..., + ignore_backoff: bool = ..., + try_trailing_slash_on_400: bool = ..., + parser: ByteParser[T] = ..., + ) -> Tuple[T, Dict[bytes, List[bytes]]]: + ... + + async def get_json_with_headers( + self, + destination: str, + path: str, + args: Optional[QueryParams] = None, + retry_on_dns_fail: bool = True, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser[T]] = None, + ) -> Tuple[Union[JsonDict, T], Dict[bytes, List[bytes]]]: + """GETs some json from the given host homeserver and path + + Args: + destination: The remote server to send the HTTP request to. + + path: The HTTP path. + + args: A dictionary used to create query strings, defaults to + None. + + retry_on_dns_fail: true if the request should be retried on DNS failures + + timeout: number of milliseconds to wait for the response. + self._default_timeout (60s) by default. + + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + + ignore_backoff: true to ignore the historical backoff data + and try the request anyway. + + try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED + response we should try appending a trailing slash to the end of + the request. Workaround for #3622 in Synapse <= v0.99.3. + + parser: The parser to use to decode the response. Defaults to + parsing as JSON. + + Returns: + Succeeds when we get a 2xx HTTP response. The result will be a tuple of the + decoded JSON body and a dict of the response headers. + Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). @@ -1156,6 +1266,8 @@ class MatrixFederationHttpClient: timeout=timeout, ) + headers = dict(response.headers.getAllRawHeaders()) + if timeout is not None: _sec_timeout = timeout / 1000 else: @@ -1173,7 +1285,7 @@ class MatrixFederationHttpClient: parser=parser, ) - return body + return body, headers async def delete_json( self, diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py new file mode 100644 index 0000000000..0874d67760 --- /dev/null +++ b/synapse/http/proxy.py @@ -0,0 +1,249 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import json +import logging +import urllib.parse +from typing import TYPE_CHECKING, Any, Optional, Set, Tuple, cast + +from twisted.internet import protocol +from twisted.internet.interfaces import ITCPTransport +from twisted.internet.protocol import connectionDone +from twisted.python import failure +from twisted.python.failure import Failure +from twisted.web.client import ResponseDone +from twisted.web.http_headers import Headers +from twisted.web.iweb import IAgent, IResponse +from twisted.web.resource import IResource +from twisted.web.server import Site + +from synapse.api.errors import Codes +from synapse.http import QuieterFileBodyProducer +from synapse.http.server import _AsyncResource +from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.types import ISynapseReactor +from synapse.util.async_helpers import timeout_deferred + +if TYPE_CHECKING: + from synapse.http.site import SynapseRequest + +logger = logging.getLogger(__name__) + +# "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616 +# section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be +# consumed by the immediate recipient and not be forwarded on. +HOP_BY_HOP_HEADERS = { + "Connection", + "Keep-Alive", + "Proxy-Authenticate", + "Proxy-Authorization", + "TE", + "Trailers", + "Transfer-Encoding", + "Upgrade", +} + + +def parse_connection_header_value( + connection_header_value: Optional[bytes], +) -> Set[str]: + """ + Parse the `Connection` header to determine which headers we should not be copied + over from the remote response. + + As defined by RFC2616 section 14.10 and RFC9110 section 7.6.1 + + Example: `Connection: close, X-Foo, X-Bar` will return `{"Close", "X-Foo", "X-Bar"}` + + Even though "close" is a special directive, let's just treat it as just another + header for simplicity. If people want to check for this directive, they can simply + check for `"Close" in headers`. + + Args: + connection_header_value: The value of the `Connection` header. + + Returns: + The set of header names that should not be copied over from the remote response. + The keys are capitalized in canonical capitalization. + """ + headers = Headers() + extra_headers_to_remove: Set[str] = set() + if connection_header_value: + extra_headers_to_remove = { + headers._canonicalNameCaps(connection_option.strip()).decode("ascii") + for connection_option in connection_header_value.split(b",") + } + + return extra_headers_to_remove + + +class ProxyResource(_AsyncResource): + """ + A stub resource that proxies any requests with a `matrix-federation://` scheme + through the given `federation_agent` to the remote homeserver and ferries back the + info. + """ + + isLeaf = True + + def __init__(self, reactor: ISynapseReactor, federation_agent: IAgent): + super().__init__(True) + + self.reactor = reactor + self.agent = federation_agent + + async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: + uri = urllib.parse.urlparse(request.uri) + assert uri.scheme == b"matrix-federation" + + headers = Headers() + for header_name in (b"User-Agent", b"Authorization", b"Content-Type"): + header_value = request.getHeader(header_name) + if header_value: + headers.addRawHeader(header_name, header_value) + + request_deferred = run_in_background( + self.agent.request, + request.method, + request.uri, + headers=headers, + bodyProducer=QuieterFileBodyProducer(request.content), + ) + request_deferred = timeout_deferred( + request_deferred, + # This should be set longer than the timeout in `MatrixFederationHttpClient` + # so that it has enough time to complete and pass us the data before we give + # up. + timeout=90, + reactor=self.reactor, + ) + + response = await make_deferred_yieldable(request_deferred) + + return response.code, response + + def _send_response( + self, + request: "SynapseRequest", + code: int, + response_object: Any, + ) -> None: + response = cast(IResponse, response_object) + response_headers = cast(Headers, response.headers) + + request.setResponseCode(code) + + # The `Connection` header also defines which headers should not be copied over. + connection_header = response_headers.getRawHeaders(b"connection") + extra_headers_to_remove = parse_connection_header_value( + connection_header[0] if connection_header else None + ) + + # Copy headers. + for k, v in response_headers.getAllRawHeaders(): + # Do not copy over any hop-by-hop headers. These are meant to only be + # consumed by the immediate recipient and not be forwarded on. + header_key = k.decode("ascii") + if ( + header_key in HOP_BY_HOP_HEADERS + or header_key in extra_headers_to_remove + ): + continue + + request.responseHeaders.setRawHeaders(k, v) + + response.deliverBody(_ProxyResponseBody(request)) + + def _send_error_response( + self, + f: failure.Failure, + request: "SynapseRequest", + ) -> None: + request.setResponseCode(502) + request.setHeader(b"Content-Type", b"application/json") + request.write( + ( + json.dumps( + { + "errcode": Codes.UNKNOWN, + "err": "ProxyResource: Error when proxying request: %s %s -> %s" + % ( + request.method.decode("ascii"), + request.uri.decode("ascii"), + f, + ), + } + ) + ).encode() + ) + request.finish() + + +class _ProxyResponseBody(protocol.Protocol): + """ + A protocol that proxies the given remote response data back out to the given local + request. + """ + + transport: Optional[ITCPTransport] = None + + def __init__(self, request: "SynapseRequest") -> None: + self._request = request + + def dataReceived(self, data: bytes) -> None: + # Avoid sending response data to the local request that already disconnected + if self._request._disconnected and self.transport is not None: + # Close the connection (forcefully) since all the data will get + # discarded anyway. + self.transport.abortConnection() + return + + self._request.write(data) + + def connectionLost(self, reason: Failure = connectionDone) -> None: + # If the local request is already finished (successfully or failed), don't + # worry about sending anything back. + if self._request.finished: + return + + if reason.check(ResponseDone): + self._request.finish() + else: + # Abort the underlying request since our remote request also failed. + self._request.transport.abortConnection() + + +class ProxySite(Site): + """ + Proxies any requests with a `matrix-federation://` scheme through the given + `federation_agent`. Otherwise, behaves like a normal `Site`. + """ + + def __init__( + self, + resource: IResource, + reactor: ISynapseReactor, + federation_agent: IAgent, + ): + super().__init__(resource, reactor=reactor) + + self._proxy_resource = ProxyResource(reactor, federation_agent) + + def getResourceFor(self, request: "SynapseRequest") -> IResource: + uri = urllib.parse.urlparse(request.uri) + if uri.scheme == b"matrix-federation": + return self._proxy_resource + + return super().getResourceFor(request) diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 7bdc4acae7..1fa3adbef2 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import random import re -from typing import Any, Dict, Optional, Tuple +from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple from urllib.parse import urlparse from urllib.request import ( # type: ignore[attr-defined] getproxies_environment, @@ -24,7 +25,12 @@ from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS -from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint +from twisted.internet.interfaces import ( + IProtocol, + IProtocolFactory, + IReactorCore, + IStreamClientEndpoint, +) from twisted.python.failure import Failure from twisted.web.client import ( URI, @@ -36,8 +42,10 @@ from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse +from synapse.config.workers import InstanceLocationConfig from synapse.http import redact_uri from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials +from synapse.logging.context import run_in_background logger = logging.getLogger(__name__) @@ -74,6 +82,10 @@ class ProxyAgent(_AgentBase): use_proxy: Whether proxy settings should be discovered and used from conventional environment variables. + federation_proxies: An optional list of locations to proxy outbound federation + traffic through (only requests that use the `matrix-federation://` scheme + will be proxied). + Raises: ValueError if use_proxy is set and the environment variables contain an invalid proxy specification. @@ -89,6 +101,7 @@ class ProxyAgent(_AgentBase): bindAddress: Optional[bytes] = None, pool: Optional[HTTPConnectionPool] = None, use_proxy: bool = False, + federation_proxies: Collection[InstanceLocationConfig] = (), ): contextFactory = contextFactory or BrowserLikePolicyForHTTPS() @@ -127,6 +140,27 @@ class ProxyAgent(_AgentBase): self._policy_for_https = contextFactory self._reactor = reactor + self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None + if federation_proxies: + endpoints = [] + for federation_proxy in federation_proxies: + endpoint = HostnameEndpoint( + self.proxy_reactor, + federation_proxy.host, + federation_proxy.port, + ) + + if federation_proxy.tls: + tls_connection_creator = self._policy_for_https.creatorForNetloc( + federation_proxy.host, + federation_proxy.port, + ) + endpoint = wrapClientTLS(tls_connection_creator, endpoint) + + endpoints.append(endpoint) + + self._federation_proxy_endpoint = _ProxyEndpoints(endpoints) + def request( self, method: bytes, @@ -214,6 +248,14 @@ class ProxyAgent(_AgentBase): parsed_uri.port, self.https_proxy_creds, ) + elif ( + parsed_uri.scheme == b"matrix-federation" + and self._federation_proxy_endpoint + ): + # Cache *all* connections under the same key, since we are only + # connecting to a single destination, the proxy: + endpoint = self._federation_proxy_endpoint + request_path = uri else: # not using a proxy endpoint = HostnameEndpoint( @@ -233,6 +275,11 @@ class ProxyAgent(_AgentBase): endpoint = wrapClientTLS(tls_connection_creator, endpoint) elif parsed_uri.scheme == b"http": pass + elif ( + parsed_uri.scheme == b"matrix-federation" + and self._federation_proxy_endpoint + ): + pass else: return defer.fail( Failure( @@ -337,3 +384,31 @@ def parse_proxy( credentials = ProxyCredentials(b"".join([url.username, b":", url.password])) return url.scheme, url.hostname, url.port or default_port, credentials + + +@implementer(IStreamClientEndpoint) +class _ProxyEndpoints: + """An endpoint that randomly iterates through a given list of endpoints at + each connection attempt. + """ + + def __init__(self, endpoints: Sequence[IStreamClientEndpoint]) -> None: + assert endpoints + self._endpoints = endpoints + + def connect( + self, protocol_factory: IProtocolFactory + ) -> "defer.Deferred[IProtocol]": + """Implements IStreamClientEndpoint interface""" + + return run_in_background(self._do_connect, protocol_factory) + + async def _do_connect(self, protocol_factory: IProtocolFactory) -> IProtocol: + failures: List[Failure] = [] + for endpoint in random.sample(self._endpoints, k=len(self._endpoints)): + try: + return await endpoint.connect(protocol_factory) + except Exception: + failures.append(Failure()) + + failures.pop().raiseException() diff --git a/synapse/http/server.py b/synapse/http/server.py index 933172c873..ff3153a9d9 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -18,6 +18,7 @@ import html import logging import types import urllib +import urllib.parse from http import HTTPStatus from http.client import FOUND from inspect import isawaitable @@ -65,7 +66,6 @@ from synapse.api.errors import ( UnrecognizedRequestError, ) from synapse.config.homeserver import HomeServerConfig -from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet from synapse.util import json_encoder @@ -76,6 +76,7 @@ from synapse.util.iterutils import chunk_seq if TYPE_CHECKING: import opentracing + from synapse.http.site import SynapseRequest from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -102,7 +103,7 @@ HTTP_STATUS_REQUEST_CANCELLED = 499 def return_json_error( - f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig] + f: failure.Failure, request: "SynapseRequest", config: Optional[HomeServerConfig] ) -> None: """Sends a JSON error response to clients.""" @@ -220,8 +221,8 @@ def return_html_error( def wrap_async_request_handler( - h: Callable[["_AsyncResource", SynapseRequest], Awaitable[None]] -) -> Callable[["_AsyncResource", SynapseRequest], "defer.Deferred[None]"]: + h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]] +) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]: """Wraps an async request handler so that it calls request.processing. This helps ensure that work done by the request handler after the request is completed @@ -235,7 +236,7 @@ def wrap_async_request_handler( """ async def wrapped_async_request_handler( - self: "_AsyncResource", request: SynapseRequest + self: "_AsyncResource", request: "SynapseRequest" ) -> None: with request.processing(): await h(self, request) @@ -300,7 +301,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): self._extract_context = extract_context - def render(self, request: SynapseRequest) -> int: + def render(self, request: "SynapseRequest") -> int: """This gets called by twisted every time someone sends us a request.""" request.render_deferred = defer.ensureDeferred( self._async_render_wrapper(request) @@ -308,7 +309,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): return NOT_DONE_YET @wrap_async_request_handler - async def _async_render_wrapper(self, request: SynapseRequest) -> None: + async def _async_render_wrapper(self, request: "SynapseRequest") -> None: """This is a wrapper that delegates to `_async_render` and handles exceptions, return values, metrics, etc. """ @@ -326,9 +327,15 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): # of our stack, and thus gives us a sensible stack # trace. f = failure.Failure() + logger.exception( + "Error handling request", + exc_info=(f.type, f.value, f.getTracebackObject()), + ) self._send_error_response(f, request) - async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, Any]]: + async def _async_render( + self, request: "SynapseRequest" + ) -> Optional[Tuple[int, Any]]: """Delegates to `_async_render_` methods, or returns a 400 if no appropriate method exists. Can be overridden in sub classes for different routing. @@ -358,7 +365,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): @abc.abstractmethod def _send_response( self, - request: SynapseRequest, + request: "SynapseRequest", code: int, response_object: Any, ) -> None: @@ -368,7 +375,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: raise NotImplementedError() @@ -384,7 +391,7 @@ class DirectServeJsonResource(_AsyncResource): def _send_response( self, - request: SynapseRequest, + request: "SynapseRequest", code: int, response_object: Any, ) -> None: @@ -401,7 +408,7 @@ class DirectServeJsonResource(_AsyncResource): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: """Implements _AsyncResource._send_error_response""" return_json_error(f, request, None) @@ -473,7 +480,7 @@ class JsonResource(DirectServeJsonResource): ) def _get_handler_for_request( - self, request: SynapseRequest + self, request: "SynapseRequest" ) -> Tuple[ServletCallback, str, Dict[str, str]]: """Finds a callback method to handle the given request. @@ -503,7 +510,7 @@ class JsonResource(DirectServeJsonResource): # Huh. No one wanted to handle that? Fiiiiiine. raise UnrecognizedRequestError(code=404) - async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]: + async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: callback, servlet_classname, group_dict = self._get_handler_for_request(request) request.is_render_cancellable = is_function_cancellable(callback) @@ -535,7 +542,7 @@ class JsonResource(DirectServeJsonResource): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: """Implements _AsyncResource._send_error_response""" return_json_error(f, request, self.hs.config) @@ -551,7 +558,7 @@ class DirectServeHtmlResource(_AsyncResource): def _send_response( self, - request: SynapseRequest, + request: "SynapseRequest", code: int, response_object: Any, ) -> None: @@ -565,7 +572,7 @@ class DirectServeHtmlResource(_AsyncResource): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: """Implements _AsyncResource._send_error_response""" return_html_error(f, request, self.ERROR_TEMPLATE) @@ -592,7 +599,7 @@ class UnrecognizedRequestResource(resource.Resource): errcode of M_UNRECOGNIZED. """ - def render(self, request: SynapseRequest) -> int: + def render(self, request: "SynapseRequest") -> int: f = failure.Failure(UnrecognizedRequestError(code=404)) return_json_error(f, request, None) # A response has already been sent but Twisted requires either NOT_DONE_YET @@ -622,7 +629,7 @@ class RootRedirect(resource.Resource): class OptionsResource(resource.Resource): """Responds to OPTION requests for itself and all children.""" - def render_OPTIONS(self, request: SynapseRequest) -> bytes: + def render_OPTIONS(self, request: "SynapseRequest") -> bytes: request.setResponseCode(204) request.setHeader(b"Content-Length", b"0") @@ -737,7 +744,7 @@ def _encode_json_bytes(json_object: object) -> bytes: def respond_with_json( - request: SynapseRequest, + request: "SynapseRequest", code: int, json_object: Any, send_cors: bool = False, @@ -787,7 +794,7 @@ def respond_with_json( def respond_with_json_bytes( - request: SynapseRequest, + request: "SynapseRequest", code: int, json_bytes: bytes, send_cors: bool = False, @@ -825,7 +832,7 @@ def respond_with_json_bytes( async def _async_write_json_to_request_in_thread( - request: SynapseRequest, + request: "SynapseRequest", json_encoder: Callable[[Any], bytes], json_object: Any, ) -> None: @@ -883,7 +890,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None: _ByteProducer(request, bytes_generator) -def set_cors_headers(request: SynapseRequest) -> None: +def set_cors_headers(request: "SynapseRequest") -> None: """Set the CORS headers so that javascript running in a web browsers can use this API @@ -981,7 +988,7 @@ def set_clickjacking_protection_headers(request: Request) -> None: def respond_with_redirect( - request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False + request: "SynapseRequest", url: bytes, statusCode: int = FOUND, cors: bool = False ) -> None: """ Write a 302 (or other specified status code) response to the request, if it is still alive. diff --git a/synapse/http/site.py b/synapse/http/site.py index 5b5a7c1e59..0ee2598345 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -21,25 +21,28 @@ from zope.interface import implementer from twisted.internet.address import UNIXAddress from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IAddress, IReactorTime +from twisted.internet.interfaces import IAddress from twisted.python.failure import Failure from twisted.web.http import HTTPChannel +from twisted.web.iweb import IAgent from twisted.web.resource import IResource, Resource -from twisted.web.server import Request, Site +from twisted.web.server import Request from synapse.config.server import ListenerConfig from synapse.http import get_request_user_agent, redact_uri +from synapse.http.proxy import ProxySite from synapse.http.request_metrics import RequestMetrics, requests_counter from synapse.logging.context import ( ContextRequest, LoggingContext, PreserveLoggingContext, ) -from synapse.types import Requester +from synapse.types import ISynapseReactor, Requester if TYPE_CHECKING: import opentracing + logger = logging.getLogger(__name__) _next_request_seq = 0 @@ -102,7 +105,7 @@ class SynapseRequest(Request): # A boolean indicating whether `render_deferred` should be cancelled if the # client disconnects early. Expected to be set by the coroutine started by # `Resource.render`, if rendering is asynchronous. - self.is_render_cancellable = False + self.is_render_cancellable: bool = False global _next_request_seq self.request_seq = _next_request_seq @@ -601,7 +604,7 @@ class _XForwardedForAddress: host: str -class SynapseSite(Site): +class SynapseSite(ProxySite): """ Synapse-specific twisted http Site @@ -623,7 +626,8 @@ class SynapseSite(Site): resource: IResource, server_version_string: str, max_request_body_size: int, - reactor: IReactorTime, + reactor: ISynapseReactor, + federation_agent: IAgent, ): """ @@ -638,7 +642,11 @@ class SynapseSite(Site): dropping the connection reactor: reactor to be used to manage connection timeouts """ - Site.__init__(self, resource, reactor=reactor) + super().__init__( + resource=resource, + reactor=reactor, + federation_agent=federation_agent, + ) self.site_tag = site_tag self.reactor = reactor @@ -649,7 +657,9 @@ class SynapseSite(Site): request_id_header = config.http_options.request_id_header - self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886 + self.experimental_cors_msc3886: bool = ( + config.http_options.experimental_cors_msc3886 + ) def request_factory(channel: HTTPChannel, queued: bool) -> Request: return request_class( diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 5a965f233b..21c5309740 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -31,9 +31,7 @@ from tests.unittest import HomeserverTestCase class FederationReaderOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer - ) + hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) return hs def default_config(self) -> JsonDict: @@ -91,9 +89,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): @patch("synapse.app.homeserver.KeyResource", new=Mock()) class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=SynapseHomeServer - ) + hs = self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer) return hs @parameterized.expand( diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index ee48f9e546..66215af2b8 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -41,7 +41,6 @@ class DeviceTestCase(unittest.HomeserverTestCase): self.appservice_api = mock.Mock() hs = self.setup_test_homeserver( "server", - federation_http_client=None, application_service_api=self.appservice_api, ) handler = hs.get_device_handler() @@ -401,7 +400,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): class DehydrationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + hs = self.setup_test_homeserver("server") handler = hs.get_device_handler() assert isinstance(handler, DeviceHandler) self.handler = handler diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index bf0862ed54..5f11d5df11 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -57,7 +57,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver(federation_http_client=None) + hs = self.setup_test_homeserver() self.handler = hs.get_federation_handler() self.store = hs.get_datastores().main return hs diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 19f5322317..fd66d573d2 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -993,7 +993,6 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( "server", - federation_http_client=None, federation_sender=Mock(spec=FederationSender), ) return hs diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 94518a7196..5da1d95f0b 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -17,6 +17,8 @@ import json from typing import Dict, List, Set from unittest.mock import ANY, Mock, call +from netaddr import IPSet + from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource @@ -24,6 +26,7 @@ from synapse.api.constants import EduTypes from synapse.api.errors import AuthError from synapse.federation.transport.server import TransportLayerServer from synapse.handlers.typing import TypingWriterHandler +from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.server import HomeServer from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock @@ -76,6 +79,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): # we mock out the federation client too self.mock_federation_client = Mock(spec=["put_json"]) self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) + self.mock_federation_client.agent = MatrixFederationAgent( + reactor, + tls_client_options_factory=None, + user_agent=b"SynapseInTrialTest/0.0.0", + ip_allowlist=None, + ip_blocklist=IPSet(), + ) # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index b5f4a60fe5..a8b9737d1f 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Generator -from unittest.mock import Mock +from typing import Any, Dict, Generator +from unittest.mock import ANY, Mock, create_autospec from netaddr import IPSet from parameterized import parameterized @@ -21,10 +21,11 @@ from twisted.internet import defer from twisted.internet.defer import Deferred, TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError from twisted.test.proto_helpers import MemoryReactor, StringTransport -from twisted.web.client import ResponseNeverReceived +from twisted.web.client import Agent, ResponseNeverReceived from twisted.web.http import HTTPChannel +from twisted.web.http_headers import Headers -from synapse.api.errors import RequestSendFailed +from synapse.api.errors import HttpResponseException, RequestSendFailed from synapse.http.matrixfederationclient import ( ByteParser, MatrixFederationHttpClient, @@ -39,7 +40,9 @@ from synapse.logging.context import ( from synapse.server import HomeServer from synapse.util import Clock +from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeTransport +from tests.test_utils import FakeResponse from tests.unittest import HomeserverTestCase, override_config @@ -658,3 +661,181 @@ class FederationClientTests(HomeserverTestCase): self.assertEqual(self.cl.max_short_retry_delay_seconds, 7) self.assertEqual(self.cl.max_long_retries, 20) self.assertEqual(self.cl.max_short_retries, 5) + + +class FederationClientProxyTests(BaseMultiWorkerStreamTestCase): + def default_config(self) -> Dict[str, Any]: + conf = super().default_config() + conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, + "federation_sender": {"host": "testserv", "port": 1001}, + } + return conf + + @override_config({"outbound_federation_restricted_to": ["federation_sender"]}) + def test_proxy_requests_through_federation_sender_worker(self) -> None: + """ + Test that all outbound federation requests go through the `federation_sender` + worker + """ + # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance + # so we can act like some remote server responding to requests + mock_client_on_federation_sender = Mock() + mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) + mock_client_on_federation_sender.agent = mock_agent_on_federation_sender + + # Create the `federation_sender` worker + self.federation_sender = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "federation_sender"}, + federation_http_client=mock_client_on_federation_sender, + ) + + # Fake `remoteserv:8008` responding to requests + mock_agent_on_federation_sender.request.side_effect = ( + lambda *args, **kwargs: defer.succeed( + FakeResponse.json( + payload={ + "foo": "bar", + } + ) + ) + ) + + # This federation request from the main process should be proxied through the + # `federation_sender` worker off to the remote server + test_request_from_main_process_d = defer.ensureDeferred( + self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar") + ) + + # Pump the reactor so our deferred goes through the motions + self.pump() + + # Make sure that the request was proxied through the `federation_sender` worker + mock_agent_on_federation_sender.request.assert_called_once_with( + b"GET", + b"matrix-federation://remoteserv:8008/foo/bar", + headers=ANY, + bodyProducer=ANY, + ) + + # Make sure the response is as expected back on the main worker + res = self.successResultOf(test_request_from_main_process_d) + self.assertEqual(res, {"foo": "bar"}) + + @override_config({"outbound_federation_restricted_to": ["federation_sender"]}) + def test_proxy_request_with_network_error_through_federation_sender_worker( + self, + ) -> None: + """ + Test that when the outbound federation request fails with a network related + error, a sensible error makes its way back to the main process. + """ + # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance + # so we can act like some remote server responding to requests + mock_client_on_federation_sender = Mock() + mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) + mock_client_on_federation_sender.agent = mock_agent_on_federation_sender + + # Create the `federation_sender` worker + self.federation_sender = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "federation_sender"}, + federation_http_client=mock_client_on_federation_sender, + ) + + # Fake `remoteserv:8008` responding to requests + mock_agent_on_federation_sender.request.side_effect = ( + lambda *args, **kwargs: defer.fail(ResponseNeverReceived("fake error")) + ) + + # This federation request from the main process should be proxied through the + # `federation_sender` worker off to the remote server + test_request_from_main_process_d = defer.ensureDeferred( + self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar") + ) + + # Pump the reactor so our deferred goes through the motions. We pump with 10 + # seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries + # and finally passes along the error response. + self.pump(0.1) + + # Make sure that the request was proxied through the `federation_sender` worker + mock_agent_on_federation_sender.request.assert_called_with( + b"GET", + b"matrix-federation://remoteserv:8008/foo/bar", + headers=ANY, + bodyProducer=ANY, + ) + + # Make sure we get some sort of error back on the main worker + failure_res = self.failureResultOf(test_request_from_main_process_d) + self.assertIsInstance(failure_res.value, RequestSendFailed) + self.assertIsInstance(failure_res.value.inner_exception, HttpResponseException) + + @override_config({"outbound_federation_restricted_to": ["federation_sender"]}) + def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None: + """ + Test to make sure hop-by-hop headers and addional headers defined in the + `Connection` header are discarded when proxying requests + """ + # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance + # so we can act like some remote server responding to requests + mock_client_on_federation_sender = Mock() + mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) + mock_client_on_federation_sender.agent = mock_agent_on_federation_sender + + # Create the `federation_sender` worker + self.federation_sender = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "federation_sender"}, + federation_http_client=mock_client_on_federation_sender, + ) + + # Fake `remoteserv:8008` responding to requests + mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed( + FakeResponse( + code=200, + body=b'{"foo": "bar"}', + headers=Headers( + { + "Content-Type": ["application/json"], + "Connection": ["close, X-Foo, X-Bar"], + # Should be removed because it's defined in the `Connection` header + "X-Foo": ["foo"], + "X-Bar": ["bar"], + # Should be removed because it's a hop-by-hop header + "Proxy-Authorization": "abcdef", + } + ), + ) + ) + + # This federation request from the main process should be proxied through the + # `federation_sender` worker off to the remote server + test_request_from_main_process_d = defer.ensureDeferred( + self.hs.get_federation_http_client().get_json_with_headers( + "remoteserv:8008", "foo/bar" + ) + ) + + # Pump the reactor so our deferred goes through the motions + self.pump() + + # Make sure that the request was proxied through the `federation_sender` worker + mock_agent_on_federation_sender.request.assert_called_once_with( + b"GET", + b"matrix-federation://remoteserv:8008/foo/bar", + headers=ANY, + bodyProducer=ANY, + ) + + res, headers = self.successResultOf(test_request_from_main_process_d) + header_names = set(headers.keys()) + + # Make sure the response does not include the hop-by-hop headers + self.assertNotIn(b"X-Foo", header_names) + self.assertNotIn(b"X-Bar", header_names) + self.assertNotIn(b"Proxy-Authorization", header_names) + # Make sure the response is as expected back on the main worker + self.assertEqual(res, {"foo": "bar"}) diff --git a/tests/http/test_proxy.py b/tests/http/test_proxy.py new file mode 100644 index 0000000000..0dc9ba8e05 --- /dev/null +++ b/tests/http/test_proxy.py @@ -0,0 +1,53 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Set + +from parameterized import parameterized + +from synapse.http.proxy import parse_connection_header_value + +from tests.unittest import TestCase + + +class ProxyTests(TestCase): + @parameterized.expand( + [ + [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + # No whitespace + [b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}], + # More whitespace + [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + # "close" directive in not the first position + [b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}], + # Normalizes header capitalization + [b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}], + # Handles header names with whitespace + [ + b"keep-alive, x foo, x bar", + {"Keep-Alive", "X foo", "X bar"}, + ], + ] + ) + def test_parse_connection_header_value( + self, + connection_header_value: bytes, + expected_extra_headers_to_remove: Set[str], + ) -> None: + """ + Tests that the connection header value is parsed correctly + """ + self.assertEqual( + expected_extra_headers_to_remove, + parse_connection_header_value(connection_header_value), + ) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index eb9b1f1cd9..96badc46b0 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -69,10 +69,10 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): # Make a new HomeServer object for the worker self.reactor.lookups["testserv"] = "1.2.3.4" self.worker_hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer, config=self._get_worker_hs_config(), reactor=self.reactor, + federation_http_client=None, ) # Since we use sqlite in memory databases we need to make sure the @@ -380,6 +380,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): server_version_string="1", max_request_body_size=8192, reactor=self.reactor, + federation_agent=worker_hs.get_federation_http_client().agent, ) worker_hs.get_replication_command_handler().start_replication(worker_hs) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 08703206a9..a324b4d31d 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -14,14 +14,18 @@ import logging from unittest.mock import Mock +from netaddr import IPSet + from synapse.api.constants import EventTypes, Membership from synapse.events.builder import EventBuilderFactory from synapse.handlers.typing import TypingWriterHandler +from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client import login, room from synapse.types import UserID, create_requester from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.server import get_clock from tests.test_utils import make_awaitable logger = logging.getLogger(__name__) @@ -41,13 +45,25 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): room.register_servlets, ] + def setUp(self) -> None: + super().setUp() + + reactor, _ = get_clock() + self.matrix_federation_agent = MatrixFederationAgent( + reactor, + tls_client_options_factory=None, + user_agent=b"SynapseInTrialTest/0.0.0", + ip_allowlist=None, + ip_blocklist=IPSet(), + ) + def test_send_event_single_sender(self) -> None: """Test that using a single federation sender worker correctly sends a new event. """ mock_client = Mock(spec=["put_json"]) mock_client.put_json.return_value = make_awaitable({}) - + mock_client.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -78,6 +94,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): """ mock_client1 = Mock(spec=["put_json"]) mock_client1.put_json.return_value = make_awaitable({}) + mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -92,6 +109,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): mock_client2 = Mock(spec=["put_json"]) mock_client2.put_json.return_value = make_awaitable({}) + mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -145,6 +163,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): """ mock_client1 = Mock(spec=["put_json"]) mock_client1.put_json.return_value = make_awaitable({}) + mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -159,6 +178,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): mock_client2 = Mock(spec=["put_json"]) mock_client2.put_json.return_value = make_awaitable({}) + mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index dcbb125a3b..e12098102b 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -40,7 +40,6 @@ class PresenceTestCase(unittest.HomeserverTestCase): hs = self.setup_test_homeserver( "red", - federation_http_client=None, federation_client=Mock(), presence_handler=self.presence_handler, ) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index f1b4e1ad2f..d013e75d55 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -67,8 +67,6 @@ class RoomBase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver( "red", - federation_http_client=None, - federation_client=Mock(), ) self.hs.get_federation_handler = Mock() # type: ignore[assignment] diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py index 9cb326d90a..f6df31aba4 100644 --- a/tests/storage/test_e2e_room_keys.py +++ b/tests/storage/test_e2e_room_keys.py @@ -31,7 +31,7 @@ room_key: RoomKey = { class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + hs = self.setup_test_homeserver("server") self.store = hs.get_datastores().main return hs diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 857e2caf2e..0282673167 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -27,7 +27,7 @@ class PurgeTests(HomeserverTestCase): servlets = [room.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + hs = self.setup_test_homeserver("server") return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index 6861d3a6c9..809c9f175d 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -45,9 +45,7 @@ def fake_listdir(filepath: str) -> List[str]: class WorkerSchemaTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer - ) + hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) return hs def default_config(self) -> JsonDict: diff --git a/tests/test_server.py b/tests/test_server.py index e266c06a2c..fe5afebdcd 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -38,7 +38,7 @@ from tests.http.server._base import test_disconnect from tests.server import ( FakeChannel, FakeSite, - ThreadedMemoryReactorClock, + get_clock, make_request, setup_test_homeserver, ) @@ -46,12 +46,11 @@ from tests.server import ( class JsonResourceTests(unittest.TestCase): def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() - self.hs_clock = Clock(self.reactor) + reactor, clock = get_clock() + self.reactor = reactor self.homeserver = setup_test_homeserver( self.addCleanup, - federation_http_client=None, - clock=self.hs_clock, + clock=clock, reactor=self.reactor, ) @@ -209,7 +208,13 @@ class JsonResourceTests(unittest.TestCase): class OptionsResourceTests(unittest.TestCase): def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() + reactor, clock = get_clock() + self.reactor = reactor + self.homeserver = setup_test_homeserver( + self.addCleanup, + clock=clock, + reactor=self.reactor, + ) class DummyResource(Resource): isLeaf = True @@ -242,6 +247,7 @@ class OptionsResourceTests(unittest.TestCase): "1.0", max_request_body_size=4096, reactor=self.reactor, + federation_agent=self.homeserver.get_federation_http_client().agent, ) # render the request and return the channel @@ -344,7 +350,8 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): await self.callback(request) def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() + reactor, _ = get_clock() + self.reactor = reactor def test_good_response(self) -> None: async def callback(request: SynapseRequest) -> None: @@ -462,9 +469,9 @@ class DirectServeJsonResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeJsonResource` cancellation.""" def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() - self.clock = Clock(self.reactor) - self.resource = CancellableDirectServeJsonResource(self.clock) + reactor, clock = get_clock() + self.reactor = reactor + self.resource = CancellableDirectServeJsonResource(clock) self.site = FakeSite(self.resource, self.reactor) def test_cancellable_disconnect(self) -> None: @@ -496,9 +503,9 @@ class DirectServeHtmlResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeHtmlResource` cancellation.""" def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() - self.clock = Clock(self.reactor) - self.resource = CancellableDirectServeHtmlResource(self.clock) + reactor, clock = get_clock() + self.reactor = reactor + self.resource = CancellableDirectServeHtmlResource(clock) self.site = FakeSite(self.resource, self.reactor) def test_cancellable_disconnect(self) -> None: diff --git a/tests/unittest.py b/tests/unittest.py index c73195b32b..334a95a917 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -358,6 +358,7 @@ class HomeserverTestCase(TestCase): server_version_string="1", max_request_body_size=4096, reactor=self.reactor, + federation_agent=self.hs.get_federation_http_client().agent, ) from tests.rest.client.utils import RestHelper From f19dd39dfc04d3533198d8a52fab6ed49e6d5066 Mon Sep 17 00:00:00 2001 From: sarthak shah <75999816+sarthakshah65@users.noreply.github.com> Date: Thu, 6 Jul 2023 20:58:09 +0530 Subject: [PATCH 191/562] Update link to the clients webpage, fix #15825 (#15874) --- changelog.d/15874.misc | 1 + synapse/static/index.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15874.misc diff --git a/changelog.d/15874.misc b/changelog.d/15874.misc new file mode 100644 index 0000000000..0d434bef5d --- /dev/null +++ b/changelog.d/15874.misc @@ -0,0 +1 @@ +Updated the link in home page from https://matrix.org/docs/projects/try-matrix-now.html#clients to https://matrix.org/ecosystem/clients/. diff --git a/synapse/static/index.html b/synapse/static/index.html index bf46df9097..297a7877f3 100644 --- a/synapse/static/index.html +++ b/synapse/static/index.html @@ -48,7 +48,7 @@

It works! Synapse is running

Your Synapse server is listening on this port and is ready for messages.

-

To use this server you'll need a Matrix client. +

To use this server you'll need a Matrix client.

Welcome to the Matrix universe :)


From 2481b7dfa41c1c890346136f04344a4e1660ef32 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Fri, 7 Jul 2023 02:45:25 -0500 Subject: [PATCH 192/562] Remove `worker_replication_*` deprecated settings, with helpful errors on startup (#15860) Co-authored-by: reivilibre --- changelog.d/15860.removal | 1 + docs/upgrade.md | 15 ++++++ .../configuration/config_documentation.md | 45 ----------------- docs/workers.md | 3 -- synapse/config/workers.py | 50 +++++++++++-------- tests/app/test_homeserver_start.py | 6 +-- tests/config/test_workers.py | 27 +--------- 7 files changed, 49 insertions(+), 98 deletions(-) create mode 100644 changelog.d/15860.removal diff --git a/changelog.d/15860.removal b/changelog.d/15860.removal new file mode 100644 index 0000000000..1993bf0299 --- /dev/null +++ b/changelog.d/15860.removal @@ -0,0 +1 @@ +Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. diff --git a/docs/upgrade.md b/docs/upgrade.md index 384f4010b4..b94d13c4da 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -98,6 +98,21 @@ You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023). If you use current versions of the Matrix.org-distributed Debian packages or Docker images, no action is required. +## Removal of `worker_replication_*` settings + +As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings +are being removed in this release of Synapse: + +* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host) +* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port) +* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls) + +Please ensure that you have migrated to using `main` on your shared configuration's `instance_map` +(or create one if necessary). This is required if you have ***any*** workers at all; +administrators of single-process (monolith) installations don't need to do anything. + +For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below. + # Upgrading to v1.86.0 diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 89a92c4682..04e8390ffe 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -4107,51 +4107,6 @@ Example configuration: worker_name: generic_worker1 ``` --- -### `worker_replication_host` -*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* - -The HTTP replication endpoint that it should talk to on the main Synapse process. -The main Synapse process defines this with a `replication` resource in -[`listeners` option](#listeners). - -Example configuration: -```yaml -worker_replication_host: 127.0.0.1 -``` ---- -### `worker_replication_http_port` -*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* - -The HTTP replication port that it should talk to on the main Synapse process. -The main Synapse process defines this with a `replication` resource in -[`listeners` option](#listeners). - -Example configuration: -```yaml -worker_replication_http_port: 9093 -``` ---- -### `worker_replication_http_tls` -*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.* - -Whether TLS should be used for talking to the HTTP replication port on the main -Synapse process. -The main Synapse process defines this with the `tls` option on its [listener](#listeners) that -has the `replication` resource enabled. - -**Please note:** by default, it is not safe to expose replication ports to the -public Internet, even with TLS enabled. -See [`worker_replication_secret`](#worker_replication_secret). - -Defaults to `false`. - -*Added in Synapse 1.72.0.* - -Example configuration: -```yaml -worker_replication_http_tls: true -``` ---- ### `worker_listeners` A worker can handle HTTP requests. To do so, a `worker_listeners` option diff --git a/docs/workers.md b/docs/workers.md index 303e0f0e7a..03415c6eb3 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -145,9 +145,6 @@ In the config file for each worker, you must specify: with an `http` listener. * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer. - * **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process - ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and - [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map` For example: diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 0b9789160c..5c81eb5c67 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -41,11 +41,17 @@ Synapse version. Please use ``%s: name_of_worker`` instead. _MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """ Missing data for a worker to connect to main process. Please include '%s' in the -`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated -solution) in every worker's yaml as various `worker_replication_*` settings as defined -in workers documentation here: +`instance_map` declared in your shared yaml configuration as defined in configuration +documentation here: +`https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#instance_map` +""" + +WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE = """ +'%s' is no longer a supported worker setting, please place '%s' onto your shared +configuration under `main` inside the `instance_map`. See workers documentation here: `https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration` """ + # This allows for a handy knob when it's time to change from 'master' to # something with less 'history' MAIN_PROCESS_INSTANCE_NAME = "master" @@ -237,22 +243,37 @@ class WorkerConfig(Config): ) # A map from instance name to host/port of their HTTP replication endpoint. - # Check if the main process is declared. Inject it into the map if it's not, - # based first on if a 'main' block is declared then on 'worker_replication_*' - # data. If both are available, default to instance_map. The main process - # itself doesn't need this data as it would never have to talk to itself. + # Check if the main process is declared. The main process itself doesn't need + # this data as it would never have to talk to itself. instance_map: Dict[str, Any] = config.get("instance_map", {}) if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME: + # TODO: The next 3 condition blocks can be deleted after some time has + # passed and we're ready to stop checking for these settings. # The host used to connect to the main synapse main_host = config.get("worker_replication_host", None) + if main_host: + raise ConfigError( + WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE + % ("worker_replication_host", main_host) + ) # The port on the main synapse for HTTP replication endpoint main_port = config.get("worker_replication_http_port") + if main_port: + raise ConfigError( + WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE + % ("worker_replication_http_port", main_port) + ) # The tls mode on the main synapse for HTTP replication endpoint. # For backward compatibility this defaults to False. main_tls = config.get("worker_replication_http_tls", False) + if main_tls: + raise ConfigError( + WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE + % ("worker_replication_http_tls", main_tls) + ) # For now, accept 'main' in the instance_map, but the replication system # expects 'master', force that into being until it's changed later. @@ -262,22 +283,9 @@ class WorkerConfig(Config): ] del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME] - # This is the backwards compatibility bit that handles the - # worker_replication_* bits using setdefault() to not overwrite anything. - elif main_host is not None and main_port is not None: - instance_map.setdefault( - MAIN_PROCESS_INSTANCE_NAME, - { - "host": main_host, - "port": main_port, - "tls": main_tls, - }, - ) - else: # If we've gotten here, it means that the main process is not on the - # instance_map and that not enough worker_replication_* variables - # were declared in the worker's yaml. + # instance_map. raise ConfigError( _MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA % MAIN_PROCESS_INSTANCE_MAP_NAME diff --git a/tests/app/test_homeserver_start.py b/tests/app/test_homeserver_start.py index cd117b7394..0201933b04 100644 --- a/tests/app/test_homeserver_start.py +++ b/tests/app/test_homeserver_start.py @@ -25,9 +25,9 @@ class HomeserverAppStartTestCase(ConfigFileTestCase): # Add a blank line as otherwise the next addition ends up on a line with a comment self.add_lines_to_config([" "]) self.add_lines_to_config(["worker_app: test_worker_app"]) - self.add_lines_to_config(["worker_replication_host: 127.0.0.1"]) - self.add_lines_to_config(["worker_replication_http_port: 0"]) - + self.add_lines_to_config(["worker_log_config: /data/logconfig.config"]) + self.add_lines_to_config(["instance_map:"]) + self.add_lines_to_config([" main:", " host: 127.0.0.1", " port: 1234"]) # Ensure that starting master process with worker config raises an exception with self.assertRaises(ConfigError): synapse.app.homeserver.setup(["-c", self.config_file]) diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py index 086359fd71..2a643ae4f3 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py @@ -17,7 +17,7 @@ from unittest.mock import Mock from immutabledict import immutabledict from synapse.config import ConfigError -from synapse.config.workers import InstanceLocationConfig, WorkerConfig +from synapse.config.workers import WorkerConfig from tests.unittest import TestCase @@ -323,28 +323,3 @@ class WorkerDutyConfigTestCase(TestCase): ) self.assertTrue(worker2_config.should_notify_appservices) self.assertFalse(worker2_config.should_update_user_directory) - - def test_worker_instance_map_compat(self) -> None: - """ - Test that `worker_replication_*` settings are compatibly handled by - adding them to the instance map as a `main` entry. - """ - - worker1_config = self._make_worker_config( - worker_app="synapse.app.generic_worker", - worker_name="worker1", - extras={ - "notify_appservices_from_worker": "worker2", - "update_user_directory_from_worker": "worker1", - "worker_replication_host": "127.0.0.42", - "worker_replication_http_port": 1979, - }, - ) - self.assertEqual( - worker1_config.instance_map, - { - "master": InstanceLocationConfig( - host="127.0.0.42", port=1979, tls=False - ), - }, - ) From 677272caed43b1c534e5e779b5261327711178a5 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 7 Jul 2023 10:09:41 +0200 Subject: [PATCH 193/562] Remove `worker_replication_*` settings from worker doc (#15872) Co-authored-by: Mathieu Velten --- changelog.d/15872.doc | 1 + docs/systemd-with-workers/workers/background_worker.yaml | 4 ---- docs/systemd-with-workers/workers/event_persister.yaml | 6 +----- docs/systemd-with-workers/workers/federation_sender.yaml | 4 ---- docs/systemd-with-workers/workers/media_worker.yaml | 4 ---- docs/systemd-with-workers/workers/pusher_worker.yaml | 4 ---- 6 files changed, 2 insertions(+), 21 deletions(-) create mode 100644 changelog.d/15872.doc diff --git a/changelog.d/15872.doc b/changelog.d/15872.doc new file mode 100644 index 0000000000..1993bf0299 --- /dev/null +++ b/changelog.d/15872.doc @@ -0,0 +1 @@ +Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. diff --git a/docs/systemd-with-workers/workers/background_worker.yaml b/docs/systemd-with-workers/workers/background_worker.yaml index 9fbfbda7db..e236e10458 100644 --- a/docs/systemd-with-workers/workers/background_worker.yaml +++ b/docs/systemd-with-workers/workers/background_worker.yaml @@ -1,8 +1,4 @@ worker_app: synapse.app.generic_worker worker_name: background_worker -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_log_config: /etc/matrix-synapse/background-worker-log.yaml diff --git a/docs/systemd-with-workers/workers/event_persister.yaml b/docs/systemd-with-workers/workers/event_persister.yaml index c11d5897b1..018133772c 100644 --- a/docs/systemd-with-workers/workers/event_persister.yaml +++ b/docs/systemd-with-workers/workers/event_persister.yaml @@ -1,9 +1,5 @@ worker_app: synapse.app.generic_worker -worker_name: event_persister1 - -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 +worker_name: event_persister1 worker_listeners: - type: http diff --git a/docs/systemd-with-workers/workers/federation_sender.yaml b/docs/systemd-with-workers/workers/federation_sender.yaml index 5c591aec2c..05b8e79e27 100644 --- a/docs/systemd-with-workers/workers/federation_sender.yaml +++ b/docs/systemd-with-workers/workers/federation_sender.yaml @@ -1,8 +1,4 @@ worker_app: synapse.app.federation_sender worker_name: federation_sender1 -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml diff --git a/docs/systemd-with-workers/workers/media_worker.yaml b/docs/systemd-with-workers/workers/media_worker.yaml index 8ad046f11a..6491959845 100644 --- a/docs/systemd-with-workers/workers/media_worker.yaml +++ b/docs/systemd-with-workers/workers/media_worker.yaml @@ -1,10 +1,6 @@ worker_app: synapse.app.media_repository worker_name: media_worker -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_listeners: - type: http port: 8085 diff --git a/docs/systemd-with-workers/workers/pusher_worker.yaml b/docs/systemd-with-workers/workers/pusher_worker.yaml index 46e22c6f06..de91d03ec0 100644 --- a/docs/systemd-with-workers/workers/pusher_worker.yaml +++ b/docs/systemd-with-workers/workers/pusher_worker.yaml @@ -1,8 +1,4 @@ worker_app: synapse.app.pusher worker_name: pusher_worker1 -# The replication listener on the main synapse process. -worker_replication_host: 127.0.0.1 -worker_replication_http_port: 9093 - worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml From f25b0f88081bb436bef914983cff7087b54eba5f Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 7 Jul 2023 09:23:27 -0700 Subject: [PATCH 194/562] Stop writing to column `user_id` of tables `profiles` and `user_filters` (#15787) --- changelog.d/15787.misc | 1 + synapse/storage/database.py | 2 + synapse/storage/databases/main/__init__.py | 6 +- synapse/storage/databases/main/filtering.py | 5 +- synapse/storage/databases/main/profile.py | 12 +-- synapse/storage/schema/__init__.py | 9 +- .../79/01_drop_user_id_constraint_profiles.py | 50 ++++++++++ ...02_drop_user_id_constraint_user_filters.py | 54 +++++++++++ tests/storage/test_profile.py | 63 ------------- tests/storage/test_user_filters.py | 94 ------------------- 10 files changed, 123 insertions(+), 173 deletions(-) create mode 100644 changelog.d/15787.misc create mode 100644 synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py create mode 100644 synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py delete mode 100644 tests/storage/test_user_filters.py diff --git a/changelog.d/15787.misc b/changelog.d/15787.misc new file mode 100644 index 0000000000..bd7536d36e --- /dev/null +++ b/changelog.d/15787.misc @@ -0,0 +1 @@ +Stop writing to column `user_id` of tables `profiles` and `user_filters`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a1c8fb0f46..c9d687fb2f 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -98,6 +98,8 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = { "event_push_summary": "event_push_summary_unique_index2", "receipts_linearized": "receipts_linearized_unique_index", "receipts_graph": "receipts_graph_unique_index", + "profiles": "profiles_full_user_id_key_idx", + "user_filters": "full_users_filters_unique_idx", } diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 80c0304b19..b6028853c9 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast from synapse.api.constants import Direction from synapse.config.homeserver import HomeServerConfig @@ -196,7 +196,7 @@ class DataStore( txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: filters = [] - args = [self.hs.config.server.server_name] + args: List[Union[str, int]] = [] # Set ordering order_by_column = UserSortOrder(order_by).value @@ -263,7 +263,7 @@ class DataStore( sql_base = f""" FROM users as u - LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ? + LEFT JOIN profiles AS p ON u.name = p.full_user_id LEFT JOIN erased_users AS eu ON u.name = eu.user_id {where_clause} """ diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index fff417f9e3..75f7fe8756 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -188,14 +188,13 @@ class FilteringWorkerStore(SQLBaseStore): filter_id = max_id + 1 sql = ( - "INSERT INTO user_filters (full_user_id, user_id, filter_id, filter_json)" - "VALUES(?, ?, ?, ?)" + "INSERT INTO user_filters (full_user_id, filter_id, filter_json)" + "VALUES(?, ?, ?)" ) txn.execute( sql, ( user_id.to_string(), - user_id.localpart, filter_id, bytearray(def_json), ), diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 3ba9cc8853..660a5507b7 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -173,10 +173,9 @@ class ProfileWorkerStore(SQLBaseStore): ) async def create_profile(self, user_id: UserID) -> None: - user_localpart = user_id.localpart await self.db_pool.simple_insert( table="profiles", - values={"user_id": user_localpart, "full_user_id": user_id.to_string()}, + values={"full_user_id": user_id.to_string()}, desc="create_profile", ) @@ -191,13 +190,11 @@ class ProfileWorkerStore(SQLBaseStore): new_displayname: The new display name. If this is None, the user's display name is removed. """ - user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", - keyvalues={"user_id": user_localpart}, + keyvalues={"full_user_id": user_id.to_string()}, values={ "displayname": new_displayname, - "full_user_id": user_id.to_string(), }, desc="set_profile_displayname", ) @@ -213,11 +210,10 @@ class ProfileWorkerStore(SQLBaseStore): new_avatar_url: The new avatar URL. If this is None, the user's avatar is removed. """ - user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", - keyvalues={"user_id": user_localpart}, - values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()}, + keyvalues={"full_user_id": user_id.to_string()}, + values={"avatar_url": new_avatar_url}, desc="set_profile_avatar_url", ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index fc190a8b13..6d14963c0a 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 78 # remember to update the list below when updating +SCHEMA_VERSION = 79 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -106,6 +106,9 @@ Changes in SCHEMA_VERSION = 77 Changes in SCHEMA_VERSION = 78 - Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters + +Changes in SCHEMA_VERSION = 79 + - We no longer write to column user_id of tables profiles and user_filters """ @@ -118,7 +121,9 @@ SCHEMA_COMPAT_VERSION = ( # # insertions to the column `full_user_id` of tables profiles and user_filters can no # longer be null - 76 + # + # we no longer write to column `full_user_id` of tables profiles and user_filters + 78 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py b/synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py new file mode 100644 index 0000000000..3541266f7d --- /dev/null +++ b/synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py @@ -0,0 +1,50 @@ +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine + + +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: + """ + Update to drop the NOT NULL constraint on column user_id so that we can cease to + write to it without inserts to other columns triggering the constraint + """ + + if isinstance(database_engine, PostgresEngine): + drop_sql = """ + ALTER TABLE profiles ALTER COLUMN user_id DROP NOT NULL + """ + cur.execute(drop_sql) + else: + # irritatingly in SQLite we need to rewrite the table to drop the constraint. + cur.execute("DROP TABLE IF EXISTS temp_profiles") + + create_sql = """ + CREATE TABLE temp_profiles ( + full_user_id text NOT NULL, + user_id text, + displayname text, + avatar_url text, + UNIQUE (full_user_id), + UNIQUE (user_id) + ) + """ + cur.execute(create_sql) + + copy_sql = """ + INSERT INTO temp_profiles ( + user_id, + displayname, + avatar_url, + full_user_id) + SELECT user_id, displayname, avatar_url, full_user_id FROM profiles + """ + cur.execute(copy_sql) + + drop_sql = """ + DROP TABLE profiles + """ + cur.execute(drop_sql) + + rename_sql = """ + ALTER TABLE temp_profiles RENAME to profiles + """ + cur.execute(rename_sql) diff --git a/synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py b/synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py new file mode 100644 index 0000000000..8e7569c470 --- /dev/null +++ b/synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py @@ -0,0 +1,54 @@ +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine + + +def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: + """ + Update to drop the NOT NULL constraint on column user_id so that we can cease to + write to it without inserts to other columns triggering the constraint + """ + if isinstance(database_engine, PostgresEngine): + drop_sql = """ + ALTER TABLE user_filters ALTER COLUMN user_id DROP NOT NULL + """ + cur.execute(drop_sql) + + else: + # irritatingly in SQLite we need to rewrite the table to drop the constraint. + cur.execute("DROP TABLE IF EXISTS temp_user_filters") + + create_sql = """ + CREATE TABLE temp_user_filters ( + full_user_id text NOT NULL, + user_id text, + filter_id bigint NOT NULL, + filter_json bytea NOT NULL + ) + """ + cur.execute(create_sql) + + index_sql = """ + CREATE UNIQUE INDEX IF NOT EXISTS user_filters_full_user_id_unique ON + temp_user_filters (full_user_id, filter_id) + """ + cur.execute(index_sql) + + copy_sql = """ + INSERT INTO temp_user_filters ( + user_id, + filter_id, + filter_json, + full_user_id) + SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters + """ + cur.execute(copy_sql) + + drop_sql = """ + DROP TABLE user_filters + """ + cur.execute(drop_sql) + + rename_sql = """ + ALTER TABLE temp_user_filters RENAME to user_filters + """ + cur.execute(rename_sql) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index fe5bb77913..bbe8bd88bc 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -15,8 +15,6 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer -from synapse.storage.database import LoggingTransaction -from synapse.storage.engines import PostgresEngine from synapse.types import UserID from synapse.util import Clock @@ -64,64 +62,3 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.assertIsNone( self.get_success(self.store.get_profile_avatar_url(self.u_frank)) ) - - def test_profiles_bg_migration(self) -> None: - """ - Test background job that copies entries from column user_id to full_user_id, adding - the hostname in the process. - """ - updater = self.hs.get_datastores().main.db_pool.updates - - # drop the constraint so we can insert nulls in full_user_id to populate the test - if isinstance(self.store.database_engine, PostgresEngine): - - def f(txn: LoggingTransaction) -> None: - txn.execute( - "ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null" - ) - - self.get_success(self.store.db_pool.runInteraction("", f)) - - for i in range(0, 70): - self.get_success( - self.store.db_pool.simple_insert( - "profiles", - {"user_id": f"hello{i:02}"}, - ) - ) - - # re-add the constraint so that when it's validated it actually exists - if isinstance(self.store.database_engine, PostgresEngine): - - def f(txn: LoggingTransaction) -> None: - txn.execute( - "ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" - ) - - self.get_success(self.store.db_pool.runInteraction("", f)) - - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - values={ - "update_name": "populate_full_user_id_profiles", - "progress_json": "{}", - }, - ) - ) - - self.get_success( - updater.run_background_updates(False), - ) - - expected_values = [] - for i in range(0, 70): - expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) - - res = self.get_success( - self.store.db_pool.execute( - "", None, "SELECT full_user_id from profiles ORDER BY full_user_id" - ) - ) - self.assertEqual(len(res), len(expected_values)) - self.assertEqual(res, expected_values) diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py deleted file mode 100644 index bab802f56e..0000000000 --- a/tests/storage/test_user_filters.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2023 The Matrix.org Foundation C.I.C -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from twisted.test.proto_helpers import MemoryReactor - -from synapse.server import HomeServer -from synapse.storage.database import LoggingTransaction -from synapse.storage.engines import PostgresEngine -from synapse.util import Clock - -from tests import unittest - - -class UserFiltersStoreTestCase(unittest.HomeserverTestCase): - """ - Test background migration that copies entries from column user_id to full_user_id, adding - the hostname in the process. - """ - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - - def test_bg_migration(self) -> None: - updater = self.hs.get_datastores().main.db_pool.updates - - # drop the constraint so we can insert nulls in full_user_id to populate the test - if isinstance(self.store.database_engine, PostgresEngine): - - def f(txn: LoggingTransaction) -> None: - txn.execute( - "ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null" - ) - - self.get_success(self.store.db_pool.runInteraction("", f)) - - for i in range(0, 70): - self.get_success( - self.store.db_pool.simple_insert( - "user_filters", - { - "user_id": f"hello{i:02}", - "filter_id": i, - "filter_json": bytearray(i), - }, - ) - ) - - # re-add the constraint so that when it's validated it actually exists - if isinstance(self.store.database_engine, PostgresEngine): - - def f(txn: LoggingTransaction) -> None: - txn.execute( - "ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" - ) - - self.get_success(self.store.db_pool.runInteraction("", f)) - - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - values={ - "update_name": "populate_full_user_id_user_filters", - "progress_json": "{}", - }, - ) - ) - - self.get_success( - updater.run_background_updates(False), - ) - - expected_values = [] - for i in range(0, 70): - expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) - - res = self.get_success( - self.store.db_pool.execute( - "", None, "SELECT full_user_id from user_filters ORDER BY full_user_id" - ) - ) - self.assertEqual(len(res), len(expected_values)) - self.assertEqual(res, expected_values) From 8a529e4fb65bb29121f9a0474da174c7b5ffac64 Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 7 Jul 2023 12:04:55 -0700 Subject: [PATCH 195/562] Stop running sytest on buster/python3.7 (#15892) --- .ci/scripts/calculate_jobs.py | 5 ----- changelog.d/15892.misc | 1 + 2 files changed, 1 insertion(+), 5 deletions(-) create mode 100644 changelog.d/15892.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index c2c18b48e3..50e11e6504 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -134,11 +134,6 @@ if not IS_PR: "sytest-tag": "testing", "postgres": "postgres", }, - { - "sytest-tag": "buster", - "postgres": "multi-postgres", - "workers": "workers", - }, ] ) diff --git a/changelog.d/15892.misc b/changelog.d/15892.misc new file mode 100644 index 0000000000..e5a123d218 --- /dev/null +++ b/changelog.d/15892.misc @@ -0,0 +1 @@ +Stop running sytest on buster/python3.7. From df8c8a4f45df909fb6d19ecca5048ad30abf5531 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 10:24:11 +0100 Subject: [PATCH 196/562] Bump lxml from 4.9.2 to 4.9.3 (#15897) Bumps [lxml](https://github.com/lxml/lxml) from 4.9.2 to 4.9.3. - [Release notes](https://github.com/lxml/lxml/releases) - [Changelog](https://github.com/lxml/lxml/blob/master/CHANGES.txt) - [Commits](https://github.com/lxml/lxml/compare/lxml-4.9.2...lxml-4.9.3) --- updated-dependencies: - dependency-name: lxml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 173 ++++++++++++++++++++++++++++------------------------ 1 file changed, 93 insertions(+), 80 deletions(-) diff --git a/poetry.lock b/poetry.lock index c62337053e..fc6892d80b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1073,95 +1073,108 @@ pyasn1 = ">=0.4.6" [[package]] name = "lxml" -version = "4.9.2" +version = "4.9.3" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" files = [ - {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"}, - {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"}, - {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"}, - {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"}, - {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"}, - {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"}, - {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"}, - {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"}, - {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"}, - {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"}, - {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"}, - {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"}, - {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"}, - {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"}, - {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"}, - {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"}, - {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"}, - {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"}, - {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"}, - {file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"}, - {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"}, - {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"}, - {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"}, - {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"}, - {file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"}, - {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"}, - {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"}, - {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"}, - {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"}, - {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"}, - {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"}, - {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"}, - {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"}, - {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"}, - {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"}, - {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"}, - {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"}, - {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"}, - {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"}, - {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"}, - {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"}, - {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"}, - {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"}, + {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"}, + {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"}, + {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"}, + {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"}, + {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"}, + {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"}, + {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"}, + {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"}, + {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"}, + {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"}, + {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"}, + {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"}, + {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"}, + {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"}, + {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"}, + {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"}, + {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"}, + {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"}, + {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"}, + {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"}, + {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"}, + {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"}, + {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"}, + {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"}, + {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"}, + {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"}, + {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"}, + {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"}, + {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"}, + {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"}, + {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"}, + {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"}, + {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"}, + {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"}, + {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"}, + {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"}, + {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"}, + {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"}, + {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"}, + {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"}, + {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"}, + {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"}, + {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"}, + {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"}, + {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"}, + {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"}, + {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"}, + {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"}, + {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"}, + {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"}, + {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"}, + {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"}, + {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"}, + {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"}, ] [package.extras] cssselect = ["cssselect (>=0.7)"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=0.29.7)"] +source = ["Cython (>=0.29.35)"] [[package]] name = "lxml-stubs" @@ -3243,4 +3256,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "0832381cc9e7065e8d95c810d732aa031b98d55cf188719989b12d841993e62e" +content-hash = "448c0d2b9815e67ac32494f1abf7b5ec9f22883833476a6942f85b3ba29ec9a6" From 3710fea19d50be1a0e83e843bde718c6c291539f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 10:16:03 +0000 Subject: [PATCH 197/562] Bump ruff from 0.0.275 to 0.0.277 (#15900) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.0.275 to 0.0.277. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/BREAKING_CHANGES.md) - [Commits](https://github.com/astral-sh/ruff/compare/v0.0.275...v0.0.277) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 38 +++++++++++++++++++------------------- pyproject.toml | 2 +- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/poetry.lock b/poetry.lock index fc6892d80b..b903fdc9ae 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2242,28 +2242,28 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.0.275" +version = "0.0.277" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.275-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:5e6554a072e7ce81eb6f0bec1cebd3dcb0e358652c0f4900d7d630d61691e914"}, - {file = "ruff-0.0.275-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:1cc599022fe5ffb143a965b8d659eb64161ab8ab4433d208777eab018a1aab67"}, - {file = "ruff-0.0.275-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5206fc1cd8c1c1deadd2e6360c0dbcd690f1c845da588ca9d32e4a764a402c60"}, - {file = "ruff-0.0.275-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c4e6468da26f77b90cae35319d310999f471a8c352998e9b39937a23750149e"}, - {file = "ruff-0.0.275-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0dbdea02942131dbc15dd45f431d152224f15e1dd1859fcd0c0487b658f60f1a"}, - {file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:22efd9f41af27ef8fb9779462c46c35c89134d33e326c889971e10b2eaf50c63"}, - {file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c09662112cfa22d7467a19252a546291fd0eae4f423e52b75a7a2000a1894db"}, - {file = "ruff-0.0.275-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80043726662144876a381efaab88841c88e8df8baa69559f96b22d4fa216bef1"}, - {file = "ruff-0.0.275-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5859ee543b01b7eb67835dfd505faa8bb7cc1550f0295c92c1401b45b42be399"}, - {file = "ruff-0.0.275-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c8ace4d40a57b5ea3c16555f25a6b16bc5d8b2779ae1912ce2633543d4e9b1da"}, - {file = "ruff-0.0.275-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8347fc16aa185aae275906c4ac5b770e00c896b6a0acd5ba521f158801911998"}, - {file = "ruff-0.0.275-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ec43658c64bfda44fd84bbea9da8c7a3b34f65448192d1c4dd63e9f4e7abfdd4"}, - {file = "ruff-0.0.275-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:508b13f7ca37274cceaba4fb3ea5da6ca192356323d92acf39462337c33ad14e"}, - {file = "ruff-0.0.275-py3-none-win32.whl", hash = "sha256:6afb1c4422f24f361e877937e2a44b3f8176774a476f5e33845ebfe887dd5ec2"}, - {file = "ruff-0.0.275-py3-none-win_amd64.whl", hash = "sha256:d9b264d78621bf7b698b6755d4913ab52c19bd28bee1a16001f954d64c1a1220"}, - {file = "ruff-0.0.275-py3-none-win_arm64.whl", hash = "sha256:a19ce3bea71023eee5f0f089dde4a4272d088d5ac0b675867e074983238ccc65"}, - {file = "ruff-0.0.275.tar.gz", hash = "sha256:a63a0b645da699ae5c758fce19188e901b3033ec54d862d93fcd042addf7f38d"}, + {file = "ruff-0.0.277-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:3250b24333ef419b7a232080d9724ccc4d2da1dbbe4ce85c4caa2290d83200f8"}, + {file = "ruff-0.0.277-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:3e60605e07482183ba1c1b7237eca827bd6cbd3535fe8a4ede28cbe2a323cb97"}, + {file = "ruff-0.0.277-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7baa97c3d7186e5ed4d5d4f6834d759a27e56cf7d5874b98c507335f0ad5aadb"}, + {file = "ruff-0.0.277-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:74e4b206cb24f2e98a615f87dbe0bde18105217cbcc8eb785bb05a644855ba50"}, + {file = "ruff-0.0.277-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:479864a3ccd8a6a20a37a6e7577bdc2406868ee80b1e65605478ad3b8eb2ba0b"}, + {file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:468bfb0a7567443cec3d03cf408d6f562b52f30c3c29df19927f1e0e13a40cd7"}, + {file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f32ec416c24542ca2f9cc8c8b65b84560530d338aaf247a4a78e74b99cd476b4"}, + {file = "ruff-0.0.277-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14a7b2f00f149c5a295f188a643ac25226ff8a4d08f7a62b1d4b0a1dc9f9b85c"}, + {file = "ruff-0.0.277-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9879f59f763cc5628aa01c31ad256a0f4dc61a29355c7315b83c2a5aac932b5"}, + {file = "ruff-0.0.277-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f612e0a14b3d145d90eb6ead990064e22f6f27281d847237560b4e10bf2251f3"}, + {file = "ruff-0.0.277-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:323b674c98078be9aaded5b8b51c0d9c424486566fb6ec18439b496ce79e5998"}, + {file = "ruff-0.0.277-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3a43fbe026ca1a2a8c45aa0d600a0116bec4dfa6f8bf0c3b871ecda51ef2b5dd"}, + {file = "ruff-0.0.277-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:734165ea8feb81b0d53e3bf523adc2413fdb76f1264cde99555161dd5a725522"}, + {file = "ruff-0.0.277-py3-none-win32.whl", hash = "sha256:88d0f2afb2e0c26ac1120e7061ddda2a566196ec4007bd66d558f13b374b9efc"}, + {file = "ruff-0.0.277-py3-none-win_amd64.whl", hash = "sha256:6fe81732f788894a00f6ade1fe69e996cc9e485b7c35b0f53fb00284397284b2"}, + {file = "ruff-0.0.277-py3-none-win_arm64.whl", hash = "sha256:2d4444c60f2e705c14cd802b55cd2b561d25bf4311702c463a002392d3116b22"}, + {file = "ruff-0.0.277.tar.gz", hash = "sha256:2dab13cdedbf3af6d4427c07f47143746b6b95d9e4a254ac369a0edb9280a0d2"}, ] [[package]] @@ -3256,4 +3256,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "448c0d2b9815e67ac32494f1abf7b5ec9f22883833476a6942f85b3ba29ec9a6" +content-hash = "0a8c6605e7e1d0ac7188a5d02b47a029bfb0f917458b87cb40755911442383d8" diff --git a/pyproject.toml b/pyproject.toml index a6e3a935a9..fc1b8c0dad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.275" +ruff = "0.0.277" # Typechecking lxml-stubs = ">=0.4.0" From 7477f43fd8aa8dd6eb46ce88bc76105f34abf3d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 15:34:26 +0200 Subject: [PATCH 198/562] Bump serde_json from 1.0.99 to 1.0.100 (#15901) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.99 to 1.0.100. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.99...v1.0.100) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 52f911277e..172a8dc913 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,9 +182,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" dependencies = [ "unicode-ident", ] @@ -273,9 +273,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2", ] @@ -320,29 +320,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.164" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" +checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.164" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ "proc-macro2", "quote", - "syn 2.0.10", + "syn 2.0.25", ] [[package]] name = "serde_json" -version = "1.0.99" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" +checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" dependencies = [ "itoa", "ryu", @@ -374,9 +374,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.10" +version = "2.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" +checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" dependencies = [ "proc-macro2", "quote", From c971698bff14c18d2b51440b9cbb9474f81c2feb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 15:55:39 +0100 Subject: [PATCH 199/562] Bump regex from 1.8.4 to 1.9.1 (#15902) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.4 to 1.9.1. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.8.4...1.9.1) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 172a8dc913..d6fa96ea77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -291,9 +291,21 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.4" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" dependencies = [ "aho-corasick", "memchr", @@ -302,9 +314,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" +checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" [[package]] name = "ryu" From 6e731e86bfa9d92f983f7df9367e37aa80733078 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 10 Jul 2023 10:23:30 -0500 Subject: [PATCH 200/562] Placeholder changelog --- changelog.d/15910.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/15910.misc diff --git a/changelog.d/15910.misc b/changelog.d/15910.misc new file mode 100644 index 0000000000..d8f68a5610 --- /dev/null +++ b/changelog.d/15910.misc @@ -0,0 +1 @@ +Placeholder changelog so we can see more of the CI Pass (remove before merging). From 6774f265b4761302cab84405a181d437e69b89c0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jul 2023 16:24:04 +0100 Subject: [PATCH 201/562] Fix building rust with nightly (#15906) Also fix up a warning. --- Cargo.toml | 1 + changelog.d/15906.misc | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15906.misc diff --git a/Cargo.toml b/Cargo.toml index de141bdee9..c636b3acda 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,3 +3,4 @@ [workspace] members = ["rust"] +resolver = "2" diff --git a/changelog.d/15906.misc b/changelog.d/15906.misc new file mode 100644 index 0000000000..b721b88d56 --- /dev/null +++ b/changelog.d/15906.misc @@ -0,0 +1 @@ +Fix building rust with nightly rust compiler. From e55a9b3e41e73f34fda781b9374935c4623f7ea9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jul 2023 16:24:42 +0100 Subject: [PATCH 202/562] Fix downgrading to previous version of Synapse (#15907) We do this by marking the constraint as deferrable. --- changelog.d/15907.misc | 1 + synapse/storage/background_updates.py | 7 ++++++- synapse/storage/databases/main/event_federation.py | 4 +++- .../main/delta/78/03event_extremities_constraints.py | 10 ++++++++-- tests/storage/test_background_update.py | 8 ++++++-- 5 files changed, 24 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15907.misc diff --git a/changelog.d/15907.misc b/changelog.d/15907.misc new file mode 100644 index 0000000000..e0ecea6c2f --- /dev/null +++ b/changelog.d/15907.misc @@ -0,0 +1 @@ +Add foreign key constraint to `event_forward_extremities`. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 5dce0a0159..2d5ddc3e7b 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -80,10 +80,14 @@ class ForeignKeyConstraint(Constraint): Attributes: referenced_table: The "parent" table name. columns: The list of mappings of columns from table to referenced table + deferred: Whether to defer checking of the constraint to the end of the + transaction. This is useful for e.g. backwards compatibility where + an older version inserted data in the wrong order. """ referenced_table: str columns: Sequence[Tuple[str, str]] + deferred: bool def make_check_clause(self, table: str) -> str: join_clause = " AND ".join( @@ -94,7 +98,8 @@ class ForeignKeyConstraint(Constraint): def make_constraint_clause_postgres(self) -> str: column1_list = ", ".join(col1 for col1, col2 in self.columns) column2_list = ", ".join(col2 for col1, col2 in self.columns) - return f"FOREIGN KEY ({column1_list}) REFERENCES {self.referenced_table} ({column2_list})" + defer_clause = " DEFERRABLE INITIALLY DEFERRED" if self.deferred else "" + return f"FOREIGN KEY ({column1_list}) REFERENCES {self.referenced_table} ({column2_list}) {defer_clause}" @attr.s(auto_attribs=True) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index dabe603c8c..b2cda52ce5 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -146,7 +146,9 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas update_name="event_forward_extremities_event_id_foreign_key_constraint_update", table="event_forward_extremities", constraint_name="event_forward_extremities_event_id", - constraint=ForeignKeyConstraint("events", [("event_id", "event_id")]), + constraint=ForeignKeyConstraint( + "events", [("event_id", "event_id")], deferred=True + ), unique_columns=("event_id", "room_id"), ) diff --git a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py index f12e2a8f3e..bf8c57dbe8 100644 --- a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py +++ b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py @@ -28,19 +28,25 @@ FORWARD_EXTREMITIES_TABLE_SCHEMA = """ event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id), - CONSTRAINT event_forward_extremities_event_id FOREIGN KEY (event_id) REFERENCES events (event_id) + CONSTRAINT event_forward_extremities_event_id FOREIGN KEY (event_id) REFERENCES events (event_id) DEFERRABLE INITIALLY DEFERRED ) """ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: + # We mark this as a deferred constraint, as the previous version of Synapse + # inserted the event into the forward extremities *before* the events table. + # By marking as deferred we ensure that downgrading to the previous version + # will continue to work. run_validate_constraint_and_delete_rows_schema_delta( cur, ordering=7803, update_name="event_forward_extremities_event_id_foreign_key_constraint_update", table="event_forward_extremities", constraint_name="event_forward_extremities_event_id", - constraint=ForeignKeyConstraint("events", [("event_id", "event_id")]), + constraint=ForeignKeyConstraint( + "events", [("event_id", "event_id")], deferred=True + ), sqlite_table_name="event_forward_extremities2", sqlite_table_schema=FORWARD_EXTREMITIES_TABLE_SCHEMA, ) diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 6ca546f3f7..a4a823a252 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -586,7 +586,9 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): update_name="test_bg_update", table="test_constraint", constraint_name="test_constraint_name", - constraint=ForeignKeyConstraint("base_table", [("b", "b")]), + constraint=ForeignKeyConstraint( + "base_table", [("b", "b")], deferred=False + ), sqlite_table_name="test_constraint2", sqlite_table_schema=table2_sqlite, ) @@ -604,7 +606,9 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): "test_bg_update", table="test_constraint", constraint_name="test_constraint_name", - constraint=ForeignKeyConstraint("base_table", [("b", "b")]), + constraint=ForeignKeyConstraint( + "base_table", [("b", "b")], deferred=False + ), unique_columns=["a"], ) From a704a35dd71cbd5eb5baba1757da07fdc49dfd8a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 10 Jul 2023 10:26:04 -0500 Subject: [PATCH 203/562] Revert "Placeholder changelog" This reverts commit 6e731e86bfa9d92f983f7df9367e37aa80733078. --- changelog.d/15910.misc | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/15910.misc diff --git a/changelog.d/15910.misc b/changelog.d/15910.misc deleted file mode 100644 index d8f68a5610..0000000000 --- a/changelog.d/15910.misc +++ /dev/null @@ -1 +0,0 @@ -Placeholder changelog so we can see more of the CI Pass (remove before merging). From c9bf644fa0c2c06f8143b14ccdb655feebed97df Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 10 Jul 2023 11:10:20 -0500 Subject: [PATCH 204/562] Revert "Federation outbound proxy" (#15910) Revert "Federation outbound proxy (#15773)" This reverts commit b07b14b494ae1dd564b4c44f844c9a9545b3d08a. --- changelog.d/15773.feature | 1 - .../configuration/config_documentation.md | 31 +-- docs/workers.md | 20 -- synapse/app/_base.py | 2 - synapse/app/generic_worker.py | 1 - synapse/app/homeserver.py | 1 - synapse/config/workers.py | 40 +-- synapse/http/client.py | 7 +- synapse/http/matrixfederationclient.py | 132 +--------- synapse/http/proxy.py | 249 ------------------ synapse/http/proxyagent.py | 79 +----- synapse/http/server.py | 55 ++-- synapse/http/site.py | 26 +- tests/app/test_openid_listener.py | 8 +- tests/handlers/test_device.py | 3 +- tests/handlers/test_federation.py | 2 +- tests/handlers/test_presence.py | 1 + tests/handlers/test_typing.py | 10 - tests/http/test_matrixfederationclient.py | 189 +------------ tests/http/test_proxy.py | 53 ---- tests/replication/_base.py | 3 +- .../test_federation_sender_shard.py | 22 +- tests/rest/client/test_presence.py | 1 + tests/rest/client/test_rooms.py | 2 + tests/storage/test_e2e_room_keys.py | 2 +- tests/storage/test_purge.py | 2 +- tests/storage/test_rollback_worker.py | 4 +- tests/test_server.py | 33 +-- tests/unittest.py | 1 - 29 files changed, 90 insertions(+), 890 deletions(-) delete mode 100644 changelog.d/15773.feature delete mode 100644 synapse/http/proxy.py delete mode 100644 tests/http/test_proxy.py diff --git a/changelog.d/15773.feature b/changelog.d/15773.feature deleted file mode 100644 index 0d77fae2dc..0000000000 --- a/changelog.d/15773.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 04e8390ffe..ff59cbccc1 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3930,14 +3930,13 @@ federation_sender_instances: --- ### `instance_map` -When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP -replication listener of the worker, if configured, and to the main process. Each worker -declared under [`stream_writers`](../../workers.md#stream-writers) and -[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP replication listener, and that -listener should be included in the `instance_map`. The main process also needs an entry -on the `instance_map`, and it should be listed under `main` **if even one other worker -exists**. Ensure the port matches with what is declared inside the `listener` block for -a `replication` listener. +When using workers this should be a map from [`worker_name`](#worker_name) to the +HTTP replication listener of the worker, if configured, and to the main process. +Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs +a HTTP replication listener, and that listener should be included in the `instance_map`. +The main process also needs an entry on the `instance_map`, and it should be listed under +`main` **if even one other worker exists**. Ensure the port matches with what is declared +inside the `listener` block for a `replication` listener. Example configuration: @@ -3967,22 +3966,6 @@ stream_writers: typing: worker1 ``` --- -### `outbound_federation_restricted_to` - -When using workers, you can restrict outbound federation traffic to only go through a -specific subset of workers. Any worker specified here must also be in the -[`instance_map`](#instance_map). - -```yaml -outbound_federation_restricted_to: - - federation_sender1 - - federation_sender2 -``` - -Also see the [worker -documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) -for more info. ---- ### `run_background_tasks_on` The [worker](../../workers.md#background-tasks) that is used to run diff --git a/docs/workers.md b/docs/workers.md index 03415c6eb3..828f082e75 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -528,26 +528,6 @@ the stream writer for the `presence` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ -#### Restrict outbound federation traffic to a specific set of workers - -The `outbound_federation_restricted_to` configuration is useful to make sure outbound -federation traffic only goes through a specified subset of workers. This allows you to -set more strict access controls (like a firewall) for all workers and only allow the -`federation_sender`'s to contact the outside world. - -```yaml -instance_map: - main: - host: localhost - port: 8030 - federation_sender1: - host: localhost - port: 8034 - -outbound_federation_restricted_to: - - federation_sender1 -``` - #### Background tasks There is also support for moving background tasks to a separate diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 938ab40f27..936b1b0430 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -386,7 +386,6 @@ def listen_unix( def listen_http( - hs: "HomeServer", listener_config: ListenerConfig, root_resource: Resource, version_string: str, @@ -407,7 +406,6 @@ def listen_http( version_string, max_request_body_size=max_request_body_size, reactor=reactor, - federation_agent=hs.get_federation_http_client().agent, ) if isinstance(listener_config, TCPListenerConfig): diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index dc79efcc14..7406c3948c 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -221,7 +221,6 @@ class GenericWorkerServer(HomeServer): root_resource = create_resource_tree(resources, OptionsResource()) _base.listen_http( - self, listener_config, root_resource, self.version_string, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index f188c7265a..84236ac299 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -139,7 +139,6 @@ class SynapseHomeServer(HomeServer): root_resource = OptionsResource() ports = listen_http( - self, listener_config, create_resource_tree(resources, root_resource), self.version_string, diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 5c81eb5c67..ccfe75eaf3 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -15,7 +15,7 @@ import argparse import logging -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Union import attr from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr @@ -154,27 +154,6 @@ class WriterLocations: ) -@attr.s(auto_attribs=True) -class OutboundFederationRestrictedTo: - """Whether we limit outbound federation to a certain set of instances. - - Attributes: - instances: optional list of instances that can make outbound federation - requests. If None then all instances can make federation requests. - locations: list of instance locations to connect to proxy via. - """ - - instances: Optional[List[str]] - locations: List[InstanceLocationConfig] = attr.Factory(list) - - def __contains__(self, instance: str) -> bool: - # It feels a bit dirty to return `True` if `instances` is `None`, but it makes - # sense in downstream usage in the sense that if - # `outbound_federation_restricted_to` is not configured, then any instance can - # talk to federation (no restrictions so always return `True`). - return self.instances is None or instance in self.instances - - class WorkerConfig(Config): """The workers are processes run separately to the main synapse process. They have their own pid_file and listener configuration. They use the @@ -386,23 +365,6 @@ class WorkerConfig(Config): new_option_name="update_user_directory_from_worker", ) - outbound_federation_restricted_to = config.get( - "outbound_federation_restricted_to", None - ) - self.outbound_federation_restricted_to = OutboundFederationRestrictedTo( - outbound_federation_restricted_to - ) - if outbound_federation_restricted_to: - for instance in outbound_federation_restricted_to: - if instance not in self.instance_map: - raise ConfigError( - "Instance %r is configured in 'outbound_federation_restricted_to' but does not appear in `instance_map` config." - % (instance,) - ) - self.outbound_federation_restricted_to.locations.append( - self.instance_map[instance] - ) - def _should_this_worker_perform_duty( self, config: Dict[str, Any], diff --git a/synapse/http/client.py b/synapse/http/client.py index ca2cdbc6e2..09ea93e10d 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1037,12 +1037,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): if reason.check(ResponseDone): self.deferred.callback(self.length) elif reason.check(PotentialDataLoss): - # This applies to requests which don't set `Content-Length` or a - # `Transfer-Encoding` in the response because in this case the end of the - # response is indicated by the connection being closed, an event which may - # also be due to a transient network problem or other error. But since this - # behavior is expected of some servers (like YouTube), let's ignore it. - # Stolen from https://github.com/twisted/treq/pull/49/files + # stolen from https://github.com/twisted/treq/pull/49/files # http://twistedmatrix.com/trac/ticket/4840 self.deferred.callback(self.length) else: diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b00396fdc7..cc4e258b0f 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -50,7 +50,7 @@ from twisted.internet.interfaces import IReactorTime from twisted.internet.task import Cooperator from twisted.web.client import ResponseFailed from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent, IBodyProducer, IResponse +from twisted.web.iweb import IBodyProducer, IResponse import synapse.metrics import synapse.util.retryutils @@ -72,7 +72,6 @@ from synapse.http.client import ( read_body_with_max_size, ) from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent -from synapse.http.proxyagent import ProxyAgent from synapse.http.types import QueryParams from synapse.logging import opentracing from synapse.logging.context import make_deferred_yieldable, run_in_background @@ -394,32 +393,17 @@ class MatrixFederationHttpClient: if hs.config.server.user_agent_suffix: user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix) - outbound_federation_restricted_to = ( - hs.config.worker.outbound_federation_restricted_to + federation_agent = MatrixFederationAgent( + self.reactor, + tls_client_options_factory, + user_agent.encode("ascii"), + hs.config.server.federation_ip_range_allowlist, + hs.config.server.federation_ip_range_blocklist, ) - if hs.get_instance_name() in outbound_federation_restricted_to: - # Talk to federation directly - federation_agent: IAgent = MatrixFederationAgent( - self.reactor, - tls_client_options_factory, - user_agent.encode("ascii"), - hs.config.server.federation_ip_range_allowlist, - hs.config.server.federation_ip_range_blocklist, - ) - else: - # We need to talk to federation via the proxy via one of the configured - # locations - federation_proxies = outbound_federation_restricted_to.locations - federation_agent = ProxyAgent( - self.reactor, - self.reactor, - tls_client_options_factory, - federation_proxies=federation_proxies, - ) # Use a BlocklistingAgentWrapper to prevent circumventing the IP # blocking via IP literals in server names - self.agent: IAgent = BlocklistingAgentWrapper( + self.agent = BlocklistingAgentWrapper( federation_agent, ip_blocklist=hs.config.server.federation_ip_range_blocklist, ) @@ -428,6 +412,7 @@ class MatrixFederationHttpClient: self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000 + self.max_long_retry_delay_seconds = ( hs.config.federation.max_long_retry_delay_ms / 1000 ) @@ -1146,101 +1131,6 @@ class MatrixFederationHttpClient: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. - Raises: - HttpResponseException: If we get an HTTP response code >= 300 - (except 429). - NotRetryingDestination: If we are not yet ready to retry this - server. - FederationDeniedError: If this destination is not on our - federation whitelist - RequestSendFailed: If there were problems connecting to the - remote, due to e.g. DNS failures, connection timeouts etc. - """ - json_dict, _ = await self.get_json_with_headers( - destination=destination, - path=path, - args=args, - retry_on_dns_fail=retry_on_dns_fail, - timeout=timeout, - ignore_backoff=ignore_backoff, - try_trailing_slash_on_400=try_trailing_slash_on_400, - parser=parser, - ) - return json_dict - - @overload - async def get_json_with_headers( - self, - destination: str, - path: str, - args: Optional[QueryParams] = None, - retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, - ignore_backoff: bool = False, - try_trailing_slash_on_400: bool = False, - parser: Literal[None] = None, - ) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]: - ... - - @overload - async def get_json_with_headers( - self, - destination: str, - path: str, - args: Optional[QueryParams] = ..., - retry_on_dns_fail: bool = ..., - timeout: Optional[int] = ..., - ignore_backoff: bool = ..., - try_trailing_slash_on_400: bool = ..., - parser: ByteParser[T] = ..., - ) -> Tuple[T, Dict[bytes, List[bytes]]]: - ... - - async def get_json_with_headers( - self, - destination: str, - path: str, - args: Optional[QueryParams] = None, - retry_on_dns_fail: bool = True, - timeout: Optional[int] = None, - ignore_backoff: bool = False, - try_trailing_slash_on_400: bool = False, - parser: Optional[ByteParser[T]] = None, - ) -> Tuple[Union[JsonDict, T], Dict[bytes, List[bytes]]]: - """GETs some json from the given host homeserver and path - - Args: - destination: The remote server to send the HTTP request to. - - path: The HTTP path. - - args: A dictionary used to create query strings, defaults to - None. - - retry_on_dns_fail: true if the request should be retried on DNS failures - - timeout: number of milliseconds to wait for the response. - self._default_timeout (60s) by default. - - Note that we may make several attempts to send the request; this - timeout applies to the time spent waiting for response headers for - *each* attempt (including connection time) as well as the time spent - reading the response body after a 200 response. - - ignore_backoff: true to ignore the historical backoff data - and try the request anyway. - - try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED - response we should try appending a trailing slash to the end of - the request. Workaround for #3622 in Synapse <= v0.99.3. - - parser: The parser to use to decode the response. Defaults to - parsing as JSON. - - Returns: - Succeeds when we get a 2xx HTTP response. The result will be a tuple of the - decoded JSON body and a dict of the response headers. - Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). @@ -1266,8 +1156,6 @@ class MatrixFederationHttpClient: timeout=timeout, ) - headers = dict(response.headers.getAllRawHeaders()) - if timeout is not None: _sec_timeout = timeout / 1000 else: @@ -1285,7 +1173,7 @@ class MatrixFederationHttpClient: parser=parser, ) - return body, headers + return body async def delete_json( self, diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py deleted file mode 100644 index 0874d67760..0000000000 --- a/synapse/http/proxy.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2023 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import json -import logging -import urllib.parse -from typing import TYPE_CHECKING, Any, Optional, Set, Tuple, cast - -from twisted.internet import protocol -from twisted.internet.interfaces import ITCPTransport -from twisted.internet.protocol import connectionDone -from twisted.python import failure -from twisted.python.failure import Failure -from twisted.web.client import ResponseDone -from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent, IResponse -from twisted.web.resource import IResource -from twisted.web.server import Site - -from synapse.api.errors import Codes -from synapse.http import QuieterFileBodyProducer -from synapse.http.server import _AsyncResource -from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.types import ISynapseReactor -from synapse.util.async_helpers import timeout_deferred - -if TYPE_CHECKING: - from synapse.http.site import SynapseRequest - -logger = logging.getLogger(__name__) - -# "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616 -# section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be -# consumed by the immediate recipient and not be forwarded on. -HOP_BY_HOP_HEADERS = { - "Connection", - "Keep-Alive", - "Proxy-Authenticate", - "Proxy-Authorization", - "TE", - "Trailers", - "Transfer-Encoding", - "Upgrade", -} - - -def parse_connection_header_value( - connection_header_value: Optional[bytes], -) -> Set[str]: - """ - Parse the `Connection` header to determine which headers we should not be copied - over from the remote response. - - As defined by RFC2616 section 14.10 and RFC9110 section 7.6.1 - - Example: `Connection: close, X-Foo, X-Bar` will return `{"Close", "X-Foo", "X-Bar"}` - - Even though "close" is a special directive, let's just treat it as just another - header for simplicity. If people want to check for this directive, they can simply - check for `"Close" in headers`. - - Args: - connection_header_value: The value of the `Connection` header. - - Returns: - The set of header names that should not be copied over from the remote response. - The keys are capitalized in canonical capitalization. - """ - headers = Headers() - extra_headers_to_remove: Set[str] = set() - if connection_header_value: - extra_headers_to_remove = { - headers._canonicalNameCaps(connection_option.strip()).decode("ascii") - for connection_option in connection_header_value.split(b",") - } - - return extra_headers_to_remove - - -class ProxyResource(_AsyncResource): - """ - A stub resource that proxies any requests with a `matrix-federation://` scheme - through the given `federation_agent` to the remote homeserver and ferries back the - info. - """ - - isLeaf = True - - def __init__(self, reactor: ISynapseReactor, federation_agent: IAgent): - super().__init__(True) - - self.reactor = reactor - self.agent = federation_agent - - async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: - uri = urllib.parse.urlparse(request.uri) - assert uri.scheme == b"matrix-federation" - - headers = Headers() - for header_name in (b"User-Agent", b"Authorization", b"Content-Type"): - header_value = request.getHeader(header_name) - if header_value: - headers.addRawHeader(header_name, header_value) - - request_deferred = run_in_background( - self.agent.request, - request.method, - request.uri, - headers=headers, - bodyProducer=QuieterFileBodyProducer(request.content), - ) - request_deferred = timeout_deferred( - request_deferred, - # This should be set longer than the timeout in `MatrixFederationHttpClient` - # so that it has enough time to complete and pass us the data before we give - # up. - timeout=90, - reactor=self.reactor, - ) - - response = await make_deferred_yieldable(request_deferred) - - return response.code, response - - def _send_response( - self, - request: "SynapseRequest", - code: int, - response_object: Any, - ) -> None: - response = cast(IResponse, response_object) - response_headers = cast(Headers, response.headers) - - request.setResponseCode(code) - - # The `Connection` header also defines which headers should not be copied over. - connection_header = response_headers.getRawHeaders(b"connection") - extra_headers_to_remove = parse_connection_header_value( - connection_header[0] if connection_header else None - ) - - # Copy headers. - for k, v in response_headers.getAllRawHeaders(): - # Do not copy over any hop-by-hop headers. These are meant to only be - # consumed by the immediate recipient and not be forwarded on. - header_key = k.decode("ascii") - if ( - header_key in HOP_BY_HOP_HEADERS - or header_key in extra_headers_to_remove - ): - continue - - request.responseHeaders.setRawHeaders(k, v) - - response.deliverBody(_ProxyResponseBody(request)) - - def _send_error_response( - self, - f: failure.Failure, - request: "SynapseRequest", - ) -> None: - request.setResponseCode(502) - request.setHeader(b"Content-Type", b"application/json") - request.write( - ( - json.dumps( - { - "errcode": Codes.UNKNOWN, - "err": "ProxyResource: Error when proxying request: %s %s -> %s" - % ( - request.method.decode("ascii"), - request.uri.decode("ascii"), - f, - ), - } - ) - ).encode() - ) - request.finish() - - -class _ProxyResponseBody(protocol.Protocol): - """ - A protocol that proxies the given remote response data back out to the given local - request. - """ - - transport: Optional[ITCPTransport] = None - - def __init__(self, request: "SynapseRequest") -> None: - self._request = request - - def dataReceived(self, data: bytes) -> None: - # Avoid sending response data to the local request that already disconnected - if self._request._disconnected and self.transport is not None: - # Close the connection (forcefully) since all the data will get - # discarded anyway. - self.transport.abortConnection() - return - - self._request.write(data) - - def connectionLost(self, reason: Failure = connectionDone) -> None: - # If the local request is already finished (successfully or failed), don't - # worry about sending anything back. - if self._request.finished: - return - - if reason.check(ResponseDone): - self._request.finish() - else: - # Abort the underlying request since our remote request also failed. - self._request.transport.abortConnection() - - -class ProxySite(Site): - """ - Proxies any requests with a `matrix-federation://` scheme through the given - `federation_agent`. Otherwise, behaves like a normal `Site`. - """ - - def __init__( - self, - resource: IResource, - reactor: ISynapseReactor, - federation_agent: IAgent, - ): - super().__init__(resource, reactor=reactor) - - self._proxy_resource = ProxyResource(reactor, federation_agent) - - def getResourceFor(self, request: "SynapseRequest") -> IResource: - uri = urllib.parse.urlparse(request.uri) - if uri.scheme == b"matrix-federation": - return self._proxy_resource - - return super().getResourceFor(request) diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 1fa3adbef2..7bdc4acae7 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import random import re -from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple +from typing import Any, Dict, Optional, Tuple from urllib.parse import urlparse from urllib.request import ( # type: ignore[attr-defined] getproxies_environment, @@ -25,12 +24,7 @@ from zope.interface import implementer from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS -from twisted.internet.interfaces import ( - IProtocol, - IProtocolFactory, - IReactorCore, - IStreamClientEndpoint, -) +from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint from twisted.python.failure import Failure from twisted.web.client import ( URI, @@ -42,10 +36,8 @@ from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse -from synapse.config.workers import InstanceLocationConfig from synapse.http import redact_uri from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials -from synapse.logging.context import run_in_background logger = logging.getLogger(__name__) @@ -82,10 +74,6 @@ class ProxyAgent(_AgentBase): use_proxy: Whether proxy settings should be discovered and used from conventional environment variables. - federation_proxies: An optional list of locations to proxy outbound federation - traffic through (only requests that use the `matrix-federation://` scheme - will be proxied). - Raises: ValueError if use_proxy is set and the environment variables contain an invalid proxy specification. @@ -101,7 +89,6 @@ class ProxyAgent(_AgentBase): bindAddress: Optional[bytes] = None, pool: Optional[HTTPConnectionPool] = None, use_proxy: bool = False, - federation_proxies: Collection[InstanceLocationConfig] = (), ): contextFactory = contextFactory or BrowserLikePolicyForHTTPS() @@ -140,27 +127,6 @@ class ProxyAgent(_AgentBase): self._policy_for_https = contextFactory self._reactor = reactor - self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None - if federation_proxies: - endpoints = [] - for federation_proxy in federation_proxies: - endpoint = HostnameEndpoint( - self.proxy_reactor, - federation_proxy.host, - federation_proxy.port, - ) - - if federation_proxy.tls: - tls_connection_creator = self._policy_for_https.creatorForNetloc( - federation_proxy.host, - federation_proxy.port, - ) - endpoint = wrapClientTLS(tls_connection_creator, endpoint) - - endpoints.append(endpoint) - - self._federation_proxy_endpoint = _ProxyEndpoints(endpoints) - def request( self, method: bytes, @@ -248,14 +214,6 @@ class ProxyAgent(_AgentBase): parsed_uri.port, self.https_proxy_creds, ) - elif ( - parsed_uri.scheme == b"matrix-federation" - and self._federation_proxy_endpoint - ): - # Cache *all* connections under the same key, since we are only - # connecting to a single destination, the proxy: - endpoint = self._federation_proxy_endpoint - request_path = uri else: # not using a proxy endpoint = HostnameEndpoint( @@ -275,11 +233,6 @@ class ProxyAgent(_AgentBase): endpoint = wrapClientTLS(tls_connection_creator, endpoint) elif parsed_uri.scheme == b"http": pass - elif ( - parsed_uri.scheme == b"matrix-federation" - and self._federation_proxy_endpoint - ): - pass else: return defer.fail( Failure( @@ -384,31 +337,3 @@ def parse_proxy( credentials = ProxyCredentials(b"".join([url.username, b":", url.password])) return url.scheme, url.hostname, url.port or default_port, credentials - - -@implementer(IStreamClientEndpoint) -class _ProxyEndpoints: - """An endpoint that randomly iterates through a given list of endpoints at - each connection attempt. - """ - - def __init__(self, endpoints: Sequence[IStreamClientEndpoint]) -> None: - assert endpoints - self._endpoints = endpoints - - def connect( - self, protocol_factory: IProtocolFactory - ) -> "defer.Deferred[IProtocol]": - """Implements IStreamClientEndpoint interface""" - - return run_in_background(self._do_connect, protocol_factory) - - async def _do_connect(self, protocol_factory: IProtocolFactory) -> IProtocol: - failures: List[Failure] = [] - for endpoint in random.sample(self._endpoints, k=len(self._endpoints)): - try: - return await endpoint.connect(protocol_factory) - except Exception: - failures.append(Failure()) - - failures.pop().raiseException() diff --git a/synapse/http/server.py b/synapse/http/server.py index ff3153a9d9..933172c873 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -18,7 +18,6 @@ import html import logging import types import urllib -import urllib.parse from http import HTTPStatus from http.client import FOUND from inspect import isawaitable @@ -66,6 +65,7 @@ from synapse.api.errors import ( UnrecognizedRequestError, ) from synapse.config.homeserver import HomeServerConfig +from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet from synapse.util import json_encoder @@ -76,7 +76,6 @@ from synapse.util.iterutils import chunk_seq if TYPE_CHECKING: import opentracing - from synapse.http.site import SynapseRequest from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -103,7 +102,7 @@ HTTP_STATUS_REQUEST_CANCELLED = 499 def return_json_error( - f: failure.Failure, request: "SynapseRequest", config: Optional[HomeServerConfig] + f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig] ) -> None: """Sends a JSON error response to clients.""" @@ -221,8 +220,8 @@ def return_html_error( def wrap_async_request_handler( - h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]] -) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]: + h: Callable[["_AsyncResource", SynapseRequest], Awaitable[None]] +) -> Callable[["_AsyncResource", SynapseRequest], "defer.Deferred[None]"]: """Wraps an async request handler so that it calls request.processing. This helps ensure that work done by the request handler after the request is completed @@ -236,7 +235,7 @@ def wrap_async_request_handler( """ async def wrapped_async_request_handler( - self: "_AsyncResource", request: "SynapseRequest" + self: "_AsyncResource", request: SynapseRequest ) -> None: with request.processing(): await h(self, request) @@ -301,7 +300,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): self._extract_context = extract_context - def render(self, request: "SynapseRequest") -> int: + def render(self, request: SynapseRequest) -> int: """This gets called by twisted every time someone sends us a request.""" request.render_deferred = defer.ensureDeferred( self._async_render_wrapper(request) @@ -309,7 +308,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): return NOT_DONE_YET @wrap_async_request_handler - async def _async_render_wrapper(self, request: "SynapseRequest") -> None: + async def _async_render_wrapper(self, request: SynapseRequest) -> None: """This is a wrapper that delegates to `_async_render` and handles exceptions, return values, metrics, etc. """ @@ -327,15 +326,9 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): # of our stack, and thus gives us a sensible stack # trace. f = failure.Failure() - logger.exception( - "Error handling request", - exc_info=(f.type, f.value, f.getTracebackObject()), - ) self._send_error_response(f, request) - async def _async_render( - self, request: "SynapseRequest" - ) -> Optional[Tuple[int, Any]]: + async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, Any]]: """Delegates to `_async_render_` methods, or returns a 400 if no appropriate method exists. Can be overridden in sub classes for different routing. @@ -365,7 +358,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): @abc.abstractmethod def _send_response( self, - request: "SynapseRequest", + request: SynapseRequest, code: int, response_object: Any, ) -> None: @@ -375,7 +368,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): def _send_error_response( self, f: failure.Failure, - request: "SynapseRequest", + request: SynapseRequest, ) -> None: raise NotImplementedError() @@ -391,7 +384,7 @@ class DirectServeJsonResource(_AsyncResource): def _send_response( self, - request: "SynapseRequest", + request: SynapseRequest, code: int, response_object: Any, ) -> None: @@ -408,7 +401,7 @@ class DirectServeJsonResource(_AsyncResource): def _send_error_response( self, f: failure.Failure, - request: "SynapseRequest", + request: SynapseRequest, ) -> None: """Implements _AsyncResource._send_error_response""" return_json_error(f, request, None) @@ -480,7 +473,7 @@ class JsonResource(DirectServeJsonResource): ) def _get_handler_for_request( - self, request: "SynapseRequest" + self, request: SynapseRequest ) -> Tuple[ServletCallback, str, Dict[str, str]]: """Finds a callback method to handle the given request. @@ -510,7 +503,7 @@ class JsonResource(DirectServeJsonResource): # Huh. No one wanted to handle that? Fiiiiiine. raise UnrecognizedRequestError(code=404) - async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: + async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]: callback, servlet_classname, group_dict = self._get_handler_for_request(request) request.is_render_cancellable = is_function_cancellable(callback) @@ -542,7 +535,7 @@ class JsonResource(DirectServeJsonResource): def _send_error_response( self, f: failure.Failure, - request: "SynapseRequest", + request: SynapseRequest, ) -> None: """Implements _AsyncResource._send_error_response""" return_json_error(f, request, self.hs.config) @@ -558,7 +551,7 @@ class DirectServeHtmlResource(_AsyncResource): def _send_response( self, - request: "SynapseRequest", + request: SynapseRequest, code: int, response_object: Any, ) -> None: @@ -572,7 +565,7 @@ class DirectServeHtmlResource(_AsyncResource): def _send_error_response( self, f: failure.Failure, - request: "SynapseRequest", + request: SynapseRequest, ) -> None: """Implements _AsyncResource._send_error_response""" return_html_error(f, request, self.ERROR_TEMPLATE) @@ -599,7 +592,7 @@ class UnrecognizedRequestResource(resource.Resource): errcode of M_UNRECOGNIZED. """ - def render(self, request: "SynapseRequest") -> int: + def render(self, request: SynapseRequest) -> int: f = failure.Failure(UnrecognizedRequestError(code=404)) return_json_error(f, request, None) # A response has already been sent but Twisted requires either NOT_DONE_YET @@ -629,7 +622,7 @@ class RootRedirect(resource.Resource): class OptionsResource(resource.Resource): """Responds to OPTION requests for itself and all children.""" - def render_OPTIONS(self, request: "SynapseRequest") -> bytes: + def render_OPTIONS(self, request: SynapseRequest) -> bytes: request.setResponseCode(204) request.setHeader(b"Content-Length", b"0") @@ -744,7 +737,7 @@ def _encode_json_bytes(json_object: object) -> bytes: def respond_with_json( - request: "SynapseRequest", + request: SynapseRequest, code: int, json_object: Any, send_cors: bool = False, @@ -794,7 +787,7 @@ def respond_with_json( def respond_with_json_bytes( - request: "SynapseRequest", + request: SynapseRequest, code: int, json_bytes: bytes, send_cors: bool = False, @@ -832,7 +825,7 @@ def respond_with_json_bytes( async def _async_write_json_to_request_in_thread( - request: "SynapseRequest", + request: SynapseRequest, json_encoder: Callable[[Any], bytes], json_object: Any, ) -> None: @@ -890,7 +883,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None: _ByteProducer(request, bytes_generator) -def set_cors_headers(request: "SynapseRequest") -> None: +def set_cors_headers(request: SynapseRequest) -> None: """Set the CORS headers so that javascript running in a web browsers can use this API @@ -988,7 +981,7 @@ def set_clickjacking_protection_headers(request: Request) -> None: def respond_with_redirect( - request: "SynapseRequest", url: bytes, statusCode: int = FOUND, cors: bool = False + request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False ) -> None: """ Write a 302 (or other specified status code) response to the request, if it is still alive. diff --git a/synapse/http/site.py b/synapse/http/site.py index 0ee2598345..5b5a7c1e59 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -21,28 +21,25 @@ from zope.interface import implementer from twisted.internet.address import UNIXAddress from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IAddress +from twisted.internet.interfaces import IAddress, IReactorTime from twisted.python.failure import Failure from twisted.web.http import HTTPChannel -from twisted.web.iweb import IAgent from twisted.web.resource import IResource, Resource -from twisted.web.server import Request +from twisted.web.server import Request, Site from synapse.config.server import ListenerConfig from synapse.http import get_request_user_agent, redact_uri -from synapse.http.proxy import ProxySite from synapse.http.request_metrics import RequestMetrics, requests_counter from synapse.logging.context import ( ContextRequest, LoggingContext, PreserveLoggingContext, ) -from synapse.types import ISynapseReactor, Requester +from synapse.types import Requester if TYPE_CHECKING: import opentracing - logger = logging.getLogger(__name__) _next_request_seq = 0 @@ -105,7 +102,7 @@ class SynapseRequest(Request): # A boolean indicating whether `render_deferred` should be cancelled if the # client disconnects early. Expected to be set by the coroutine started by # `Resource.render`, if rendering is asynchronous. - self.is_render_cancellable: bool = False + self.is_render_cancellable = False global _next_request_seq self.request_seq = _next_request_seq @@ -604,7 +601,7 @@ class _XForwardedForAddress: host: str -class SynapseSite(ProxySite): +class SynapseSite(Site): """ Synapse-specific twisted http Site @@ -626,8 +623,7 @@ class SynapseSite(ProxySite): resource: IResource, server_version_string: str, max_request_body_size: int, - reactor: ISynapseReactor, - federation_agent: IAgent, + reactor: IReactorTime, ): """ @@ -642,11 +638,7 @@ class SynapseSite(ProxySite): dropping the connection reactor: reactor to be used to manage connection timeouts """ - super().__init__( - resource=resource, - reactor=reactor, - federation_agent=federation_agent, - ) + Site.__init__(self, resource, reactor=reactor) self.site_tag = site_tag self.reactor = reactor @@ -657,9 +649,7 @@ class SynapseSite(ProxySite): request_id_header = config.http_options.request_id_header - self.experimental_cors_msc3886: bool = ( - config.http_options.experimental_cors_msc3886 - ) + self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886 def request_factory(channel: HTTPChannel, queued: bool) -> Request: return request_class( diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 21c5309740..5a965f233b 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -31,7 +31,9 @@ from tests.unittest import HomeserverTestCase class FederationReaderOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) + hs = self.setup_test_homeserver( + federation_http_client=None, homeserver_to_use=GenericWorkerServer + ) return hs def default_config(self) -> JsonDict: @@ -89,7 +91,9 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): @patch("synapse.app.homeserver.KeyResource", new=Mock()) class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer) + hs = self.setup_test_homeserver( + federation_http_client=None, homeserver_to_use=SynapseHomeServer + ) return hs @parameterized.expand( diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 66215af2b8..ee48f9e546 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -41,6 +41,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): self.appservice_api = mock.Mock() hs = self.setup_test_homeserver( "server", + federation_http_client=None, application_service_api=self.appservice_api, ) handler = hs.get_device_handler() @@ -400,7 +401,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): class DehydrationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server") + hs = self.setup_test_homeserver("server", federation_http_client=None) handler = hs.get_device_handler() assert isinstance(handler, DeviceHandler) self.handler = handler diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 5f11d5df11..bf0862ed54 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -57,7 +57,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver() + hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() self.store = hs.get_datastores().main return hs diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index fd66d573d2..19f5322317 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -993,6 +993,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( "server", + federation_http_client=None, federation_sender=Mock(spec=FederationSender), ) return hs diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 5da1d95f0b..94518a7196 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -17,8 +17,6 @@ import json from typing import Dict, List, Set from unittest.mock import ANY, Mock, call -from netaddr import IPSet - from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource @@ -26,7 +24,6 @@ from synapse.api.constants import EduTypes from synapse.api.errors import AuthError from synapse.federation.transport.server import TransportLayerServer from synapse.handlers.typing import TypingWriterHandler -from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.server import HomeServer from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock @@ -79,13 +76,6 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): # we mock out the federation client too self.mock_federation_client = Mock(spec=["put_json"]) self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) - self.mock_federation_client.agent = MatrixFederationAgent( - reactor, - tls_client_options_factory=None, - user_agent=b"SynapseInTrialTest/0.0.0", - ip_allowlist=None, - ip_blocklist=IPSet(), - ) # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index a8b9737d1f..b5f4a60fe5 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Generator -from unittest.mock import ANY, Mock, create_autospec +from typing import Generator +from unittest.mock import Mock from netaddr import IPSet from parameterized import parameterized @@ -21,11 +21,10 @@ from twisted.internet import defer from twisted.internet.defer import Deferred, TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError from twisted.test.proto_helpers import MemoryReactor, StringTransport -from twisted.web.client import Agent, ResponseNeverReceived +from twisted.web.client import ResponseNeverReceived from twisted.web.http import HTTPChannel -from twisted.web.http_headers import Headers -from synapse.api.errors import HttpResponseException, RequestSendFailed +from synapse.api.errors import RequestSendFailed from synapse.http.matrixfederationclient import ( ByteParser, MatrixFederationHttpClient, @@ -40,9 +39,7 @@ from synapse.logging.context import ( from synapse.server import HomeServer from synapse.util import Clock -from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeTransport -from tests.test_utils import FakeResponse from tests.unittest import HomeserverTestCase, override_config @@ -661,181 +658,3 @@ class FederationClientTests(HomeserverTestCase): self.assertEqual(self.cl.max_short_retry_delay_seconds, 7) self.assertEqual(self.cl.max_long_retries, 20) self.assertEqual(self.cl.max_short_retries, 5) - - -class FederationClientProxyTests(BaseMultiWorkerStreamTestCase): - def default_config(self) -> Dict[str, Any]: - conf = super().default_config() - conf["instance_map"] = { - "main": {"host": "testserv", "port": 8765}, - "federation_sender": {"host": "testserv", "port": 1001}, - } - return conf - - @override_config({"outbound_federation_restricted_to": ["federation_sender"]}) - def test_proxy_requests_through_federation_sender_worker(self) -> None: - """ - Test that all outbound federation requests go through the `federation_sender` - worker - """ - # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance - # so we can act like some remote server responding to requests - mock_client_on_federation_sender = Mock() - mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) - mock_client_on_federation_sender.agent = mock_agent_on_federation_sender - - # Create the `federation_sender` worker - self.federation_sender = self.make_worker_hs( - "synapse.app.generic_worker", - {"worker_name": "federation_sender"}, - federation_http_client=mock_client_on_federation_sender, - ) - - # Fake `remoteserv:8008` responding to requests - mock_agent_on_federation_sender.request.side_effect = ( - lambda *args, **kwargs: defer.succeed( - FakeResponse.json( - payload={ - "foo": "bar", - } - ) - ) - ) - - # This federation request from the main process should be proxied through the - # `federation_sender` worker off to the remote server - test_request_from_main_process_d = defer.ensureDeferred( - self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar") - ) - - # Pump the reactor so our deferred goes through the motions - self.pump() - - # Make sure that the request was proxied through the `federation_sender` worker - mock_agent_on_federation_sender.request.assert_called_once_with( - b"GET", - b"matrix-federation://remoteserv:8008/foo/bar", - headers=ANY, - bodyProducer=ANY, - ) - - # Make sure the response is as expected back on the main worker - res = self.successResultOf(test_request_from_main_process_d) - self.assertEqual(res, {"foo": "bar"}) - - @override_config({"outbound_federation_restricted_to": ["federation_sender"]}) - def test_proxy_request_with_network_error_through_federation_sender_worker( - self, - ) -> None: - """ - Test that when the outbound federation request fails with a network related - error, a sensible error makes its way back to the main process. - """ - # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance - # so we can act like some remote server responding to requests - mock_client_on_federation_sender = Mock() - mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) - mock_client_on_federation_sender.agent = mock_agent_on_federation_sender - - # Create the `federation_sender` worker - self.federation_sender = self.make_worker_hs( - "synapse.app.generic_worker", - {"worker_name": "federation_sender"}, - federation_http_client=mock_client_on_federation_sender, - ) - - # Fake `remoteserv:8008` responding to requests - mock_agent_on_federation_sender.request.side_effect = ( - lambda *args, **kwargs: defer.fail(ResponseNeverReceived("fake error")) - ) - - # This federation request from the main process should be proxied through the - # `federation_sender` worker off to the remote server - test_request_from_main_process_d = defer.ensureDeferred( - self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar") - ) - - # Pump the reactor so our deferred goes through the motions. We pump with 10 - # seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries - # and finally passes along the error response. - self.pump(0.1) - - # Make sure that the request was proxied through the `federation_sender` worker - mock_agent_on_federation_sender.request.assert_called_with( - b"GET", - b"matrix-federation://remoteserv:8008/foo/bar", - headers=ANY, - bodyProducer=ANY, - ) - - # Make sure we get some sort of error back on the main worker - failure_res = self.failureResultOf(test_request_from_main_process_d) - self.assertIsInstance(failure_res.value, RequestSendFailed) - self.assertIsInstance(failure_res.value.inner_exception, HttpResponseException) - - @override_config({"outbound_federation_restricted_to": ["federation_sender"]}) - def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None: - """ - Test to make sure hop-by-hop headers and addional headers defined in the - `Connection` header are discarded when proxying requests - """ - # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance - # so we can act like some remote server responding to requests - mock_client_on_federation_sender = Mock() - mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) - mock_client_on_federation_sender.agent = mock_agent_on_federation_sender - - # Create the `federation_sender` worker - self.federation_sender = self.make_worker_hs( - "synapse.app.generic_worker", - {"worker_name": "federation_sender"}, - federation_http_client=mock_client_on_federation_sender, - ) - - # Fake `remoteserv:8008` responding to requests - mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed( - FakeResponse( - code=200, - body=b'{"foo": "bar"}', - headers=Headers( - { - "Content-Type": ["application/json"], - "Connection": ["close, X-Foo, X-Bar"], - # Should be removed because it's defined in the `Connection` header - "X-Foo": ["foo"], - "X-Bar": ["bar"], - # Should be removed because it's a hop-by-hop header - "Proxy-Authorization": "abcdef", - } - ), - ) - ) - - # This federation request from the main process should be proxied through the - # `federation_sender` worker off to the remote server - test_request_from_main_process_d = defer.ensureDeferred( - self.hs.get_federation_http_client().get_json_with_headers( - "remoteserv:8008", "foo/bar" - ) - ) - - # Pump the reactor so our deferred goes through the motions - self.pump() - - # Make sure that the request was proxied through the `federation_sender` worker - mock_agent_on_federation_sender.request.assert_called_once_with( - b"GET", - b"matrix-federation://remoteserv:8008/foo/bar", - headers=ANY, - bodyProducer=ANY, - ) - - res, headers = self.successResultOf(test_request_from_main_process_d) - header_names = set(headers.keys()) - - # Make sure the response does not include the hop-by-hop headers - self.assertNotIn(b"X-Foo", header_names) - self.assertNotIn(b"X-Bar", header_names) - self.assertNotIn(b"Proxy-Authorization", header_names) - # Make sure the response is as expected back on the main worker - self.assertEqual(res, {"foo": "bar"}) diff --git a/tests/http/test_proxy.py b/tests/http/test_proxy.py deleted file mode 100644 index 0dc9ba8e05..0000000000 --- a/tests/http/test_proxy.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2023 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Set - -from parameterized import parameterized - -from synapse.http.proxy import parse_connection_header_value - -from tests.unittest import TestCase - - -class ProxyTests(TestCase): - @parameterized.expand( - [ - [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], - # No whitespace - [b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}], - # More whitespace - [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], - # "close" directive in not the first position - [b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}], - # Normalizes header capitalization - [b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}], - # Handles header names with whitespace - [ - b"keep-alive, x foo, x bar", - {"Keep-Alive", "X foo", "X bar"}, - ], - ] - ) - def test_parse_connection_header_value( - self, - connection_header_value: bytes, - expected_extra_headers_to_remove: Set[str], - ) -> None: - """ - Tests that the connection header value is parsed correctly - """ - self.assertEqual( - expected_extra_headers_to_remove, - parse_connection_header_value(connection_header_value), - ) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 96badc46b0..eb9b1f1cd9 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -69,10 +69,10 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): # Make a new HomeServer object for the worker self.reactor.lookups["testserv"] = "1.2.3.4" self.worker_hs = self.setup_test_homeserver( + federation_http_client=None, homeserver_to_use=GenericWorkerServer, config=self._get_worker_hs_config(), reactor=self.reactor, - federation_http_client=None, ) # Since we use sqlite in memory databases we need to make sure the @@ -380,7 +380,6 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): server_version_string="1", max_request_body_size=8192, reactor=self.reactor, - federation_agent=worker_hs.get_federation_http_client().agent, ) worker_hs.get_replication_command_handler().start_replication(worker_hs) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index a324b4d31d..08703206a9 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -14,18 +14,14 @@ import logging from unittest.mock import Mock -from netaddr import IPSet - from synapse.api.constants import EventTypes, Membership from synapse.events.builder import EventBuilderFactory from synapse.handlers.typing import TypingWriterHandler -from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client import login, room from synapse.types import UserID, create_requester from tests.replication._base import BaseMultiWorkerStreamTestCase -from tests.server import get_clock from tests.test_utils import make_awaitable logger = logging.getLogger(__name__) @@ -45,25 +41,13 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): room.register_servlets, ] - def setUp(self) -> None: - super().setUp() - - reactor, _ = get_clock() - self.matrix_federation_agent = MatrixFederationAgent( - reactor, - tls_client_options_factory=None, - user_agent=b"SynapseInTrialTest/0.0.0", - ip_allowlist=None, - ip_blocklist=IPSet(), - ) - def test_send_event_single_sender(self) -> None: """Test that using a single federation sender worker correctly sends a new event. """ mock_client = Mock(spec=["put_json"]) mock_client.put_json.return_value = make_awaitable({}) - mock_client.agent = self.matrix_federation_agent + self.make_worker_hs( "synapse.app.generic_worker", { @@ -94,7 +78,6 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): """ mock_client1 = Mock(spec=["put_json"]) mock_client1.put_json.return_value = make_awaitable({}) - mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -109,7 +92,6 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): mock_client2 = Mock(spec=["put_json"]) mock_client2.put_json.return_value = make_awaitable({}) - mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -163,7 +145,6 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): """ mock_client1 = Mock(spec=["put_json"]) mock_client1.put_json.return_value = make_awaitable({}) - mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -178,7 +159,6 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): mock_client2 = Mock(spec=["put_json"]) mock_client2.put_json.return_value = make_awaitable({}) - mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index e12098102b..dcbb125a3b 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -40,6 +40,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): hs = self.setup_test_homeserver( "red", + federation_http_client=None, federation_client=Mock(), presence_handler=self.presence_handler, ) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index d013e75d55..f1b4e1ad2f 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -67,6 +67,8 @@ class RoomBase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver( "red", + federation_http_client=None, + federation_client=Mock(), ) self.hs.get_federation_handler = Mock() # type: ignore[assignment] diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py index f6df31aba4..9cb326d90a 100644 --- a/tests/storage/test_e2e_room_keys.py +++ b/tests/storage/test_e2e_room_keys.py @@ -31,7 +31,7 @@ room_key: RoomKey = { class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server") + hs = self.setup_test_homeserver("server", federation_http_client=None) self.store = hs.get_datastores().main return hs diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 0282673167..857e2caf2e 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -27,7 +27,7 @@ class PurgeTests(HomeserverTestCase): servlets = [room.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server") + hs = self.setup_test_homeserver("server", federation_http_client=None) return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index 809c9f175d..6861d3a6c9 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -45,7 +45,9 @@ def fake_listdir(filepath: str) -> List[str]: class WorkerSchemaTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) + hs = self.setup_test_homeserver( + federation_http_client=None, homeserver_to_use=GenericWorkerServer + ) return hs def default_config(self) -> JsonDict: diff --git a/tests/test_server.py b/tests/test_server.py index fe5afebdcd..e266c06a2c 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -38,7 +38,7 @@ from tests.http.server._base import test_disconnect from tests.server import ( FakeChannel, FakeSite, - get_clock, + ThreadedMemoryReactorClock, make_request, setup_test_homeserver, ) @@ -46,11 +46,12 @@ from tests.server import ( class JsonResourceTests(unittest.TestCase): def setUp(self) -> None: - reactor, clock = get_clock() - self.reactor = reactor + self.reactor = ThreadedMemoryReactorClock() + self.hs_clock = Clock(self.reactor) self.homeserver = setup_test_homeserver( self.addCleanup, - clock=clock, + federation_http_client=None, + clock=self.hs_clock, reactor=self.reactor, ) @@ -208,13 +209,7 @@ class JsonResourceTests(unittest.TestCase): class OptionsResourceTests(unittest.TestCase): def setUp(self) -> None: - reactor, clock = get_clock() - self.reactor = reactor - self.homeserver = setup_test_homeserver( - self.addCleanup, - clock=clock, - reactor=self.reactor, - ) + self.reactor = ThreadedMemoryReactorClock() class DummyResource(Resource): isLeaf = True @@ -247,7 +242,6 @@ class OptionsResourceTests(unittest.TestCase): "1.0", max_request_body_size=4096, reactor=self.reactor, - federation_agent=self.homeserver.get_federation_http_client().agent, ) # render the request and return the channel @@ -350,8 +344,7 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): await self.callback(request) def setUp(self) -> None: - reactor, _ = get_clock() - self.reactor = reactor + self.reactor = ThreadedMemoryReactorClock() def test_good_response(self) -> None: async def callback(request: SynapseRequest) -> None: @@ -469,9 +462,9 @@ class DirectServeJsonResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeJsonResource` cancellation.""" def setUp(self) -> None: - reactor, clock = get_clock() - self.reactor = reactor - self.resource = CancellableDirectServeJsonResource(clock) + self.reactor = ThreadedMemoryReactorClock() + self.clock = Clock(self.reactor) + self.resource = CancellableDirectServeJsonResource(self.clock) self.site = FakeSite(self.resource, self.reactor) def test_cancellable_disconnect(self) -> None: @@ -503,9 +496,9 @@ class DirectServeHtmlResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeHtmlResource` cancellation.""" def setUp(self) -> None: - reactor, clock = get_clock() - self.reactor = reactor - self.resource = CancellableDirectServeHtmlResource(clock) + self.reactor = ThreadedMemoryReactorClock() + self.clock = Clock(self.reactor) + self.resource = CancellableDirectServeHtmlResource(self.clock) self.site = FakeSite(self.resource, self.reactor) def test_cancellable_disconnect(self) -> None: diff --git a/tests/unittest.py b/tests/unittest.py index 334a95a917..c73195b32b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -358,7 +358,6 @@ class HomeserverTestCase(TestCase): server_version_string="1", max_request_body_size=4096, reactor=self.reactor, - federation_agent=self.hs.get_federation_http_client().agent, ) from tests.rest.client.utils import RestHelper From 5e82b07d2c79df9623e5567f935e74395c2e5492 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 10 Jul 2023 10:39:36 -0700 Subject: [PATCH 205/562] Drop debian buster (#15893) --- .github/workflows/twisted_trunk.yml | 6 +++++- changelog.d/15893.misc | 1 + docs/deprecation_policy.md | 2 +- docs/development/contributing_guide.md | 2 +- scripts-dev/build_debian_packages.py | 1 - 5 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15893.misc diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 55081f8133..f7a4ee7c13 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -96,7 +96,11 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: - image: matrixdotorg/sytest-synapse:buster + # We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version. + # This job is a canary to warn us about unreleased twisted changes that would cause problems for us if + # they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest + # version, assuming that any incompatibilities on newer versions would also be present on the oldest. + image: matrixdotorg/sytest-synapse:focal volumes: - ${{ github.workspace }}:/src diff --git a/changelog.d/15893.misc b/changelog.d/15893.misc new file mode 100644 index 0000000000..656d73b231 --- /dev/null +++ b/changelog.d/15893.misc @@ -0,0 +1 @@ +Drop Debian Buster since we no longer support Python 3.7. diff --git a/docs/deprecation_policy.md b/docs/deprecation_policy.md index 46c18d7d32..8403664850 100644 --- a/docs/deprecation_policy.md +++ b/docs/deprecation_policy.md @@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust (e.g. by using [rustup](https://rustup.rs/)). The oldest supported version of SQLite is the version -[provided](https://packages.debian.org/buster/libsqlite3-0) by +[provided](https://packages.debian.org/bullseye/libsqlite3-0) by [Debian oldstable](https://wiki.debian.org/DebianOldStable). Context diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index f5ba55afb7..e9210b1776 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common configuration: ```sh -$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster +$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal ``` (Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.) diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 4c9f134ddd..8fe10f2cb5 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -23,7 +23,6 @@ from typing import Collection, Optional, Sequence, Set # These are expanded inside the dockerfile to be a fully qualified image name. # e.g. docker.io/library/debian:bullseye DISTS = ( - "debian:buster", # oldstable: EOL 2022-08 "debian:bullseye", "debian:bookworm", "debian:sid", From 2328e90fbb65216ff84a08834d3cd99573bccdff Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 10 Jul 2023 17:23:11 -0500 Subject: [PATCH 206/562] Make the media `/upload` tracing less ambiguous (#15888) A lot of the functions have the same name in this space like `store_file`, and we also do it multiple times for different reasons (main media repo, other storage providers, thumbnails, etc) so it's good to differentiate them so your head doesn't explode. Follow-up to https://github.com/matrix-org/synapse/pull/15850 Tracing instrumentation to media `/upload` code paths to investigate https://github.com/matrix-org/synapse/issues/15841 --- changelog.d/15888.misc | 1 + synapse/media/media_storage.py | 72 +++++++++++++++++++------------ synapse/media/storage_provider.py | 23 +++++----- 3 files changed, 58 insertions(+), 38 deletions(-) create mode 100644 changelog.d/15888.misc diff --git a/changelog.d/15888.misc b/changelog.d/15888.misc new file mode 100644 index 0000000000..0e49ab23fe --- /dev/null +++ b/changelog.d/15888.misc @@ -0,0 +1 @@ +Add tracing to media `/upload` code paths. diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index eebcbc48e8..a17ccb3d80 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -38,7 +38,7 @@ from twisted.protocols.basic import FileSender from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import start_active_span, trace, trace_with_opname from synapse.util import Clock from synapse.util.file_consumer import BackgroundFileConsumer @@ -77,7 +77,7 @@ class MediaStorage: self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.clock = hs.get_clock() - @trace + @trace_with_opname("MediaStorage.store_file") async def store_file(self, source: IO, file_info: FileInfo) -> str: """Write `source` to the on disk media store, and also any other configured storage providers @@ -91,18 +91,19 @@ class MediaStorage: """ with self.store_into_file(file_info) as (f, fname, finish_cb): - # Write to the main repository + # Write to the main media repository await self.write_to_file(source, f) + # Write to the other storage providers await finish_cb() return fname - @trace + @trace_with_opname("MediaStorage.write_to_file") async def write_to_file(self, source: IO, output: IO) -> None: """Asynchronously write the `source` to `output`.""" await defer_to_thread(self.reactor, _write_file_synchronously, source, output) - @trace + @trace_with_opname("MediaStorage.store_into_file") @contextlib.contextmanager def store_into_file( self, file_info: FileInfo @@ -117,9 +118,9 @@ class MediaStorage: fname can be used to read the contents from after upload, e.g. to generate thumbnails. - finish_cb must be called and waited on after the file has been - successfully been written to. Should not be called if there was an - error. + finish_cb must be called and waited on after the file has been successfully been + written to. Should not be called if there was an error. Checks for spam and + stores the file into the configured storage providers. Args: file_info: Info about the file to store @@ -139,35 +140,48 @@ class MediaStorage: finished_called = [False] + main_media_repo_write_trace_scope = start_active_span( + "writing to main media repo" + ) + main_media_repo_write_trace_scope.__enter__() + try: with open(fname, "wb") as f: async def finish() -> None: - # Ensure that all writes have been flushed and close the - # file. - f.flush() - f.close() + # When someone calls finish, we assume they are done writing to the main media repo + main_media_repo_write_trace_scope.__exit__(None, None, None) - spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam( - ReadableFileWrapper(self.clock, fname), file_info - ) - if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: - logger.info("Blocking media due to spam checker") - # Note that we'll delete the stored media, due to the - # try/except below. The media also won't be stored in - # the DB. - # We currently ignore any additional field returned by - # the spam-check API. - raise SpamMediaException(errcode=spam_check[0]) + with start_active_span("writing to other storage providers"): + # Ensure that all writes have been flushed and close the + # file. + f.flush() + f.close() - for provider in self.storage_providers: - await provider.store_file(path, file_info) + spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam( + ReadableFileWrapper(self.clock, fname), file_info + ) + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: + logger.info("Blocking media due to spam checker") + # Note that we'll delete the stored media, due to the + # try/except below. The media also won't be stored in + # the DB. + # We currently ignore any additional field returned by + # the spam-check API. + raise SpamMediaException(errcode=spam_check[0]) - finished_called[0] = True + for provider in self.storage_providers: + with start_active_span(str(provider)): + await provider.store_file(path, file_info) + + finished_called[0] = True yield f, fname, finish except Exception as e: try: + main_media_repo_write_trace_scope.__exit__( + type(e), None, e.__traceback__ + ) os.remove(fname) except Exception: pass @@ -175,7 +189,11 @@ class MediaStorage: raise e from None if not finished_called: - raise Exception("Finished callback not called") + exc = Exception("Finished callback not called") + main_media_repo_write_trace_scope.__exit__( + type(exc), None, exc.__traceback__ + ) + raise exc async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: """Attempts to fetch media described by file_info from the local cache diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py index 0aea3a7a0d..70a45cfd5b 100644 --- a/synapse/media/storage_provider.py +++ b/synapse/media/storage_provider.py @@ -20,7 +20,7 @@ from typing import TYPE_CHECKING, Callable, Optional from synapse.config._base import Config from synapse.logging.context import defer_to_thread, run_in_background -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import start_active_span, trace_with_opname from synapse.util.async_helpers import maybe_awaitable from ._base import FileInfo, Responder @@ -87,7 +87,7 @@ class StorageProviderWrapper(StorageProvider): def __str__(self) -> str: return "StorageProviderWrapper[%s]" % (self.backend,) - @trace + @trace_with_opname("StorageProviderWrapper.store_file") async def store_file(self, path: str, file_info: FileInfo) -> None: if not file_info.server_name and not self.store_local: return None @@ -116,7 +116,7 @@ class StorageProviderWrapper(StorageProvider): run_in_background(store) - @trace + @trace_with_opname("StorageProviderWrapper.fetch") async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: if file_info.url_cache: # Files in the URL preview cache definitely aren't stored here, @@ -144,7 +144,7 @@ class FileStorageProviderBackend(StorageProvider): def __str__(self) -> str: return "FileStorageProviderBackend[%s]" % (self.base_directory,) - @trace + @trace_with_opname("FileStorageProviderBackend.store_file") async def store_file(self, path: str, file_info: FileInfo) -> None: """See StorageProvider.store_file""" @@ -156,14 +156,15 @@ class FileStorageProviderBackend(StorageProvider): # mypy needs help inferring the type of the second parameter, which is generic shutil_copyfile: Callable[[str, str], str] = shutil.copyfile - await defer_to_thread( - self.hs.get_reactor(), - shutil_copyfile, - primary_fname, - backup_fname, - ) + with start_active_span("shutil_copyfile"): + await defer_to_thread( + self.hs.get_reactor(), + shutil_copyfile, + primary_fname, + backup_fname, + ) - @trace + @trace_with_opname("FileStorageProviderBackend.fetch") async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: """See StorageProvider.fetch""" From b516d919995f3bf36045263376628ff0aa298095 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Tue, 11 Jul 2023 09:18:50 +0100 Subject: [PATCH 207/562] Add `Server` to Access-Control-Expose-Headers header (#15908) --- changelog.d/15908.misc | 1 + synapse/http/server.py | 2 +- tests/test_server.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15908.misc diff --git a/changelog.d/15908.misc b/changelog.d/15908.misc new file mode 100644 index 0000000000..3ab8674e03 --- /dev/null +++ b/changelog.d/15908.misc @@ -0,0 +1 @@ +Add `Server` to Access-Control-Expose-Headers header. diff --git a/synapse/http/server.py b/synapse/http/server.py index 933172c873..e411ac7e62 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -910,7 +910,7 @@ def set_cors_headers(request: SynapseRequest) -> None: ) request.setHeader( b"Access-Control-Expose-Headers", - b"Synapse-Trace-Id", + b"Synapse-Trace-Id, Server", ) diff --git a/tests/test_server.py b/tests/test_server.py index e266c06a2c..dc491e06ed 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -268,7 +268,7 @@ class OptionsResourceTests(unittest.TestCase): ) self.assertEqual( channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"), - [b"Synapse-Trace-Id"], + [b"Synapse-Trace-Id, Server"], ) def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None: From 7c7bd9898b071385400ef2e083ae0b85c4935242 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 11 Jul 2023 10:28:11 +0100 Subject: [PATCH 208/562] 1.88.0rc1 --- CHANGES.md | 49 +++++++++++++++++++++++++++++++++++++++ changelog.d/15751.misc | 1 - changelog.d/15782.misc | 1 - changelog.d/15787.misc | 1 - changelog.d/15826.misc | 1 - changelog.d/15844.feature | 1 - changelog.d/15850.misc | 1 - changelog.d/15851.removal | 1 - changelog.d/15852.doc | 1 - changelog.d/15853.misc | 1 - changelog.d/15854.misc | 1 - changelog.d/15860.removal | 1 - changelog.d/15861.misc | 1 - changelog.d/15862.bugfix | 3 --- changelog.d/15872.doc | 1 - changelog.d/15874.misc | 1 - changelog.d/15876.bugfix | 1 - changelog.d/15888.misc | 1 - changelog.d/15892.misc | 1 - changelog.d/15893.misc | 1 - changelog.d/15906.misc | 1 - changelog.d/15907.misc | 1 - changelog.d/15908.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 25 files changed, 56 insertions(+), 25 deletions(-) delete mode 100644 changelog.d/15751.misc delete mode 100644 changelog.d/15782.misc delete mode 100644 changelog.d/15787.misc delete mode 100644 changelog.d/15826.misc delete mode 100644 changelog.d/15844.feature delete mode 100644 changelog.d/15850.misc delete mode 100644 changelog.d/15851.removal delete mode 100644 changelog.d/15852.doc delete mode 100644 changelog.d/15853.misc delete mode 100644 changelog.d/15854.misc delete mode 100644 changelog.d/15860.removal delete mode 100644 changelog.d/15861.misc delete mode 100644 changelog.d/15862.bugfix delete mode 100644 changelog.d/15872.doc delete mode 100644 changelog.d/15874.misc delete mode 100644 changelog.d/15876.bugfix delete mode 100644 changelog.d/15888.misc delete mode 100644 changelog.d/15892.misc delete mode 100644 changelog.d/15893.misc delete mode 100644 changelog.d/15906.misc delete mode 100644 changelog.d/15907.misc delete mode 100644 changelog.d/15908.misc diff --git a/CHANGES.md b/CHANGES.md index 860e89ed99..f3ba4a454f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,52 @@ +# Synapse 1.88.0rc1 (2023-07-11) + +Note that the minimum supported version of Python is now 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/). + +### Features + +- Add `not_user_type` param to the [list accounts admin API](https://matrix-org.github.io/synapse/v1.88/admin_api/user_admin_api.html#list-accounts). ([\#15844](https://github.com/matrix-org/synapse/issues/15844)) + +### Bugfixes + +- Pin `pydantic` to `^=1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release. + Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862)) +- Correctly resize thumbnails with pillow version >=10. ([\#15876](https://github.com/matrix-org/synapse/issues/15876)) + +### Improved Documentation + +- Fixed header levels on the [Admin API "Users"](https://matrix-org.github.io/synapse/v1.87/admin_api/user_admin_api.html) documentation page. Contributed by @sumnerevans at @beeper. ([\#15852](https://github.com/matrix-org/synapse/issues/15852)) +- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15872](https://github.com/matrix-org/synapse/issues/15872)) + +### Deprecations and Removals + +- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893)) +- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15860](https://github.com/matrix-org/synapse/issues/15860)) + +### Internal Changes + +- Add foreign key constraint to `event_forward_extremities`. ([\#15751](https://github.com/matrix-org/synapse/issues/15751), [\#15907](https://github.com/matrix-org/synapse/issues/15907)) +- Add read/write style cross-worker locks. ([\#15782](https://github.com/matrix-org/synapse/issues/15782)) +- Stop writing to column `user_id` of tables `profiles` and `user_filters`. ([\#15787](https://github.com/matrix-org/synapse/issues/15787)) +- Use lower isolation level when cleaning old presence stream data to avoid serialization errors. ([\#15826](https://github.com/matrix-org/synapse/issues/15826)) +- Add tracing to media `/upload` code paths. ([\#15850](https://github.com/matrix-org/synapse/issues/15850), [\#15888](https://github.com/matrix-org/synapse/issues/15888)) +- Add a timeout that aborts any Postgres statement taking more than 1 hour. ([\#15853](https://github.com/matrix-org/synapse/issues/15853)) +- Fix the `devenv up` configuration which was ignoring the config overrides. ([\#15854](https://github.com/matrix-org/synapse/issues/15854)) +- Optimised cleanup of old entries in `device_lists_stream`. ([\#15861](https://github.com/matrix-org/synapse/issues/15861)) +- Update the Matrix clients link in the _It works! Synapse is running_ landing page. ([\#15874](https://github.com/matrix-org/synapse/issues/15874)) +- Fix building Synapse with the nightly Rust compiler. ([\#15906](https://github.com/matrix-org/synapse/issues/15906)) +- Add `Server` to Access-Control-Expose-Headers header. ([\#15908](https://github.com/matrix-org/synapse/issues/15908)) + +### Updates to locked dependencies + +* Bump authlib from 1.2.0 to 1.2.1. ([\#15864](https://github.com/matrix-org/synapse/issues/15864)) +* Bump importlib-metadata from 6.6.0 to 6.7.0. ([\#15865](https://github.com/matrix-org/synapse/issues/15865)) +* Bump lxml from 4.9.2 to 4.9.3. ([\#15897](https://github.com/matrix-org/synapse/issues/15897)) +* Bump regex from 1.8.4 to 1.9.1. ([\#15902](https://github.com/matrix-org/synapse/issues/15902)) +* Bump ruff from 0.0.275 to 0.0.277. ([\#15900](https://github.com/matrix-org/synapse/issues/15900)) +* Bump sentry-sdk from 1.25.1 to 1.26.0. ([\#15867](https://github.com/matrix-org/synapse/issues/15867)) +* Bump serde_json from 1.0.99 to 1.0.100. ([\#15901](https://github.com/matrix-org/synapse/issues/15901)) +* Bump types-pyopenssl from 23.2.0.0 to 23.2.0.1. ([\#15866](https://github.com/matrix-org/synapse/issues/15866)) + # Synapse 1.87.0 (2023-07-04) Please note that this will be the last release of Synapse that is compatible with diff --git a/changelog.d/15751.misc b/changelog.d/15751.misc deleted file mode 100644 index e0ecea6c2f..0000000000 --- a/changelog.d/15751.misc +++ /dev/null @@ -1 +0,0 @@ -Add foreign key constraint to `event_forward_extremities`. diff --git a/changelog.d/15782.misc b/changelog.d/15782.misc deleted file mode 100644 index aae493b973..0000000000 --- a/changelog.d/15782.misc +++ /dev/null @@ -1 +0,0 @@ -Add read/write style cross-worker locks. diff --git a/changelog.d/15787.misc b/changelog.d/15787.misc deleted file mode 100644 index bd7536d36e..0000000000 --- a/changelog.d/15787.misc +++ /dev/null @@ -1 +0,0 @@ -Stop writing to column `user_id` of tables `profiles` and `user_filters`. diff --git a/changelog.d/15826.misc b/changelog.d/15826.misc deleted file mode 100644 index 88903f3f7c..0000000000 --- a/changelog.d/15826.misc +++ /dev/null @@ -1 +0,0 @@ -Use lower isolation level when cleaning old presence stream data to avoid serialization errors. diff --git a/changelog.d/15844.feature b/changelog.d/15844.feature deleted file mode 100644 index c220055d41..0000000000 --- a/changelog.d/15844.feature +++ /dev/null @@ -1 +0,0 @@ -Add `not_user_type` param to the list accounts admin API. diff --git a/changelog.d/15850.misc b/changelog.d/15850.misc deleted file mode 100644 index 0e49ab23fe..0000000000 --- a/changelog.d/15850.misc +++ /dev/null @@ -1 +0,0 @@ -Add tracing to media `/upload` code paths. diff --git a/changelog.d/15851.removal b/changelog.d/15851.removal deleted file mode 100644 index e08df4c136..0000000000 --- a/changelog.d/15851.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for Python 3.7. diff --git a/changelog.d/15852.doc b/changelog.d/15852.doc deleted file mode 100644 index 060b55d106..0000000000 --- a/changelog.d/15852.doc +++ /dev/null @@ -1 +0,0 @@ -Fixed header levels on the Admin API "Users" documentation page. Contributed by @sumnerevans at @beeper. diff --git a/changelog.d/15853.misc b/changelog.d/15853.misc deleted file mode 100644 index 3e9516b1ad..0000000000 --- a/changelog.d/15853.misc +++ /dev/null @@ -1 +0,0 @@ -Add a timeout that aborts any Postgres statement taking more than 1 hour. \ No newline at end of file diff --git a/changelog.d/15854.misc b/changelog.d/15854.misc deleted file mode 100644 index 8c940dd9c5..0000000000 --- a/changelog.d/15854.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the `devenv up` configuration which was ignoring the config overrides. \ No newline at end of file diff --git a/changelog.d/15860.removal b/changelog.d/15860.removal deleted file mode 100644 index 1993bf0299..0000000000 --- a/changelog.d/15860.removal +++ /dev/null @@ -1 +0,0 @@ -Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. diff --git a/changelog.d/15861.misc b/changelog.d/15861.misc deleted file mode 100644 index 6f320eab81..0000000000 --- a/changelog.d/15861.misc +++ /dev/null @@ -1 +0,0 @@ -Optimised cleanup of old entries in device_lists_stream. diff --git a/changelog.d/15862.bugfix b/changelog.d/15862.bugfix deleted file mode 100644 index 8eb6aa9a7f..0000000000 --- a/changelog.d/15862.bugfix +++ /dev/null @@ -1,3 +0,0 @@ -Pin `pydantic` to ^=1.7.4 to avoid backwards-incompatible API changes from the 2.0.0 release. -Resolves https://github.com/matrix-org/synapse/issues/15858. -Contributed by @PaarthShah. diff --git a/changelog.d/15872.doc b/changelog.d/15872.doc deleted file mode 100644 index 1993bf0299..0000000000 --- a/changelog.d/15872.doc +++ /dev/null @@ -1 +0,0 @@ -Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. diff --git a/changelog.d/15874.misc b/changelog.d/15874.misc deleted file mode 100644 index 0d434bef5d..0000000000 --- a/changelog.d/15874.misc +++ /dev/null @@ -1 +0,0 @@ -Updated the link in home page from https://matrix.org/docs/projects/try-matrix-now.html#clients to https://matrix.org/ecosystem/clients/. diff --git a/changelog.d/15876.bugfix b/changelog.d/15876.bugfix deleted file mode 100644 index 9dbae04c4f..0000000000 --- a/changelog.d/15876.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correctly resize thumbnails with pillow version >=10. diff --git a/changelog.d/15888.misc b/changelog.d/15888.misc deleted file mode 100644 index 0e49ab23fe..0000000000 --- a/changelog.d/15888.misc +++ /dev/null @@ -1 +0,0 @@ -Add tracing to media `/upload` code paths. diff --git a/changelog.d/15892.misc b/changelog.d/15892.misc deleted file mode 100644 index e5a123d218..0000000000 --- a/changelog.d/15892.misc +++ /dev/null @@ -1 +0,0 @@ -Stop running sytest on buster/python3.7. diff --git a/changelog.d/15893.misc b/changelog.d/15893.misc deleted file mode 100644 index 656d73b231..0000000000 --- a/changelog.d/15893.misc +++ /dev/null @@ -1 +0,0 @@ -Drop Debian Buster since we no longer support Python 3.7. diff --git a/changelog.d/15906.misc b/changelog.d/15906.misc deleted file mode 100644 index b721b88d56..0000000000 --- a/changelog.d/15906.misc +++ /dev/null @@ -1 +0,0 @@ -Fix building rust with nightly rust compiler. diff --git a/changelog.d/15907.misc b/changelog.d/15907.misc deleted file mode 100644 index e0ecea6c2f..0000000000 --- a/changelog.d/15907.misc +++ /dev/null @@ -1 +0,0 @@ -Add foreign key constraint to `event_forward_extremities`. diff --git a/changelog.d/15908.misc b/changelog.d/15908.misc deleted file mode 100644 index 3ab8674e03..0000000000 --- a/changelog.d/15908.misc +++ /dev/null @@ -1 +0,0 @@ -Add `Server` to Access-Control-Expose-Headers header. diff --git a/debian/changelog b/debian/changelog index 0d9216bee8..763edb8ec2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium + + * New Synapse release 1.88.0rc1. + + -- Synapse Packaging team Tue, 11 Jul 2023 10:20:19 +0100 + matrix-synapse-py3 (1.87.0) stable; urgency=medium * New Synapse release 1.87.0. diff --git a/pyproject.toml b/pyproject.toml index fc1b8c0dad..b61b8b17ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.87.0" +version = "1.88.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 4ccfa16081c042b935b58e63e06995af5d3e08f8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 11 Jul 2023 10:34:09 +0100 Subject: [PATCH 209/562] Call out upgrade notes in README --- CHANGES.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f3ba4a454f..d71d4124fa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,10 @@ # Synapse 1.88.0rc1 (2023-07-11) -Note that the minimum supported version of Python is now 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/). +This release + - raises the minimum supported version of Python to 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/), and + - removes deprecated config options related to worker deployment. + +See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#upgrading-to-v1880) for more information. ### Features @@ -19,8 +23,8 @@ Note that the minimum supported version of Python is now 3.8, as Python 3.7 is n ### Deprecations and Removals +- **Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options.** See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#removal-of-worker_replication_-settings) for more details. ([\#15860](https://github.com/matrix-org/synapse/issues/15860)) - Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893)) -- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15860](https://github.com/matrix-org/synapse/issues/15860)) ### Internal Changes From 92014fbf7286afa9099d162b22feda868af820f8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 11 Jul 2023 15:16:19 +0100 Subject: [PATCH 210/562] Don't build wheels for Python 3.7 (#15917) * Don't build wheels for CPython or PyPy 3.7 * Update pyproject.toml comments * Manually update the changelog --- CHANGES.md | 2 +- pyproject.toml | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d71d4124fa..22d56a9a01 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -24,7 +24,7 @@ See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88 ### Deprecations and Removals - **Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options.** See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#removal-of-worker_replication_-settings) for more details. ([\#15860](https://github.com/matrix-org/synapse/issues/15860)) -- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893)) +- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893), [\#15917](https://github.com/matrix-org/synapse/pull/15917)) ### Internal Changes diff --git a/pyproject.toml b/pyproject.toml index b61b8b17ac..d56602b2df 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -373,7 +373,15 @@ build-backend = "poetry.core.masonry.api" [tool.cibuildwheel] # Skip unsupported platforms (by us or by Rust). -skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" +# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets. +# We skip: +# - CPython 3.6 and 3.7: EOLed +# - PyPy 3.7: we only support Python 3.8+ +# - musllinux i686: excluded to reduce number of wheels we build. +# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677 +# - PyPy on Aarch64 and musllinux on aarch64: too slow to build. +# c.f. https://github.com/matrix-org/synapse/pull/14259 +skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" # We need a rust compiler before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal" From a4243183f0b500f9f30f2d24af19f30a99f65f63 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 11 Jul 2023 12:21:00 -0400 Subject: [PATCH 211/562] Add + as an allowed character for Matrix IDs (MSC4009) (#15911) --- changelog.d/15911.feature | 1 + synapse/config/experimental.py | 3 --- synapse/handlers/register.py | 9 ++------- synapse/handlers/saml.py | 4 ++-- synapse/handlers/sso.py | 6 ++---- synapse/types/__init__.py | 22 +++++----------------- tests/handlers/test_register.py | 11 +++++------ 7 files changed, 17 insertions(+), 39 deletions(-) create mode 100644 changelog.d/15911.feature diff --git a/changelog.d/15911.feature b/changelog.d/15911.feature new file mode 100644 index 0000000000..b24077c6c3 --- /dev/null +++ b/changelog.d/15911.feature @@ -0,0 +1 @@ +Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 8e0f5356b4..0970f22a75 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -382,9 +382,6 @@ class ExperimentalConfig(Config): # Check that none of the other config options conflict with MSC3861 when enabled self.msc3861.check_config_conflicts(self.root) - # MSC4009: E.164 Matrix IDs - self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False) - # MSC4010: Do not allow setting m.push_rules account data. self.msc4010_push_rules_account_data = experimental.get( "msc4010_push_rules_account_data", False diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a2d3f03061..3a55056df5 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -143,15 +143,10 @@ class RegistrationHandler: assigned_user_id: Optional[str] = None, inhibit_user_in_use_error: bool = False, ) -> None: - if types.contains_invalid_mxid_characters( - localpart, self.hs.config.experimental.msc4009_e164_mxids - ): - extra_chars = ( - "=_-./+" if self.hs.config.experimental.msc4009_e164_mxids else "=_-./" - ) + if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, - f"User ID can only contain characters a-z, 0-9, or '{extra_chars}'", + "User ID can only contain characters a-z, 0-9, or '=_-./+'", Codes.INVALID_USERNAME, ) diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 874860d461..6083c9f4b5 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -27,9 +27,9 @@ from synapse.http.servlet import parse_string from synapse.http.site import SynapseRequest from synapse.module_api import ModuleApi from synapse.types import ( + MXID_LOCALPART_ALLOWED_CHARACTERS, UserID, map_username_to_mxid_localpart, - mxid_localpart_allowed_characters, ) from synapse.util.iterutils import chunk_seq @@ -371,7 +371,7 @@ class SamlHandler: DOT_REPLACE_PATTERN = re.compile( - "[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),) + "[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS)),) ) diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index c3a51722bd..4d29328a74 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -225,8 +225,6 @@ class SsoHandler: self._consent_at_registration = hs.config.consent.user_consent_at_registration - self._e164_mxids = hs.config.experimental.msc4009_e164_mxids - def register_identity_provider(self, p: SsoIdentityProvider) -> None: p_id = p.idp_id assert p_id not in self._identity_providers @@ -713,7 +711,7 @@ class SsoHandler: # Since the localpart is provided via a potentially untrusted module, # ensure the MXID is valid before registering. if not attributes.localpart or contains_invalid_mxid_characters( - attributes.localpart, self._e164_mxids + attributes.localpart ): raise MappingException("localpart is invalid: %s" % (attributes.localpart,)) @@ -946,7 +944,7 @@ class SsoHandler: localpart, ) - if contains_invalid_mxid_characters(localpart, self._e164_mxids): + if contains_invalid_mxid_characters(localpart): raise SynapseError(400, "localpart is invalid: %s" % (localpart,)) user_id = UserID(localpart, self._server_name).to_string() user_infos = await self._store.get_users_by_id_case_insensitive(user_id) diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 095be070e0..fdfd465c8d 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -348,22 +348,15 @@ class EventID(DomainSpecificString): SIGIL = "$" -mxid_localpart_allowed_characters = set( - "_-./=" + string.ascii_lowercase + string.digits +MXID_LOCALPART_ALLOWED_CHARACTERS = set( + "_-./=+" + string.ascii_lowercase + string.digits ) -# MSC4007 adds the + to the allowed characters. -# -# TODO If this was accepted, update the SSO code to support this, see the callers -# of map_username_to_mxid_localpart. -extended_mxid_localpart_allowed_characters = mxid_localpart_allowed_characters | {"+"} # Guest user IDs are purely numeric. GUEST_USER_ID_PATTERN = re.compile(r"^\d+$") -def contains_invalid_mxid_characters( - localpart: str, use_extended_character_set: bool -) -> bool: +def contains_invalid_mxid_characters(localpart: str) -> bool: """Check for characters not allowed in an mxid or groupid localpart Args: @@ -374,12 +367,7 @@ def contains_invalid_mxid_characters( Returns: True if there are any naughty characters """ - allowed_characters = ( - extended_mxid_localpart_allowed_characters - if use_extended_character_set - else mxid_localpart_allowed_characters - ) - return any(c not in allowed_characters for c in localpart) + return any(c not in MXID_LOCALPART_ALLOWED_CHARACTERS for c in localpart) UPPER_CASE_PATTERN = re.compile(b"[A-Z_]") @@ -396,7 +384,7 @@ UPPER_CASE_PATTERN = re.compile(b"[A-Z_]") # bytes rather than strings # NON_MXID_CHARACTER_PATTERN = re.compile( - ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters - {"="})),)).encode( + ("[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS - {"="})),)).encode( "ascii" ) ) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 8d8584609b..54eeec228e 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -587,17 +587,16 @@ class RegistrationTestCase(unittest.HomeserverTestCase): self.assertFalse(self.get_success(d)) def test_invalid_user_id(self) -> None: - invalid_user_id = "+abcd" + invalid_user_id = "^abcd" self.get_failure( self.handler.register_user(localpart=invalid_user_id), SynapseError ) - @override_config({"experimental_features": {"msc4009_e164_mxids": True}}) - def text_extended_user_ids(self) -> None: - """+ should be allowed according to MSC4009.""" - valid_user_id = "+1234" + def test_special_chars(self) -> None: + """Ensure that characters which are allowed in Matrix IDs work.""" + valid_user_id = "a1234_-./=+" user_id = self.get_success(self.handler.register_user(localpart=valid_user_id)) - self.assertEqual(user_id, valid_user_id) + self.assertEqual(user_id, f"@{valid_user_id}:test") def test_invalid_user_id_length(self) -> None: invalid_user_id = "x" * 256 From 224ef0b669fdd85925d66deb38ba1b51c5aaa1bd Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 11 Jul 2023 13:08:06 -0500 Subject: [PATCH 212/562] Unix Sockets for HTTP Replication (#15708) Unix socket support for `federation` and `client` Listeners has existed now for a little while(since [1.81.0](https://github.com/matrix-org/synapse/pull/15353)), but there was one last hold out before it could be complete: HTTP Replication communication. This should finish it up. The Listeners would have always worked, but would have had no way to be talked to/at. --------- Co-authored-by: Eric Eastwood Co-authored-by: Olivier Wilkinson (reivilibre) Co-authored-by: Eric Eastwood --- changelog.d/15708.feature | 1 + docker/conf-workers/nginx.conf.j2 | 4 + docker/conf-workers/shared.yaml.j2 | 3 + docker/conf-workers/supervisord.conf.j2 | 4 + docker/conf-workers/worker.yaml.j2 | 4 + docker/conf/homeserver.yaml | 10 +- docker/configure_workers_and_start.py | 104 +++++++++++++----- docs/development/contributing_guide.md | 1 + .../configuration/config_documentation.md | 52 ++++++++- docs/workers.md | 9 +- scripts-dev/complement.sh | 4 + synapse/config/workers.py | 24 +++- synapse/http/replicationagent.py | 47 +++++--- synapse/logging/opentracing.py | 6 +- tests/replication/_base.py | 7 +- tests/server.py | 32 +++++- 16 files changed, 260 insertions(+), 52 deletions(-) create mode 100644 changelog.d/15708.feature diff --git a/changelog.d/15708.feature b/changelog.d/15708.feature new file mode 100644 index 0000000000..06a6c959ab --- /dev/null +++ b/changelog.d/15708.feature @@ -0,0 +1 @@ +Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little. diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2 index 967fc65e79..d1e02af723 100644 --- a/docker/conf-workers/nginx.conf.j2 +++ b/docker/conf-workers/nginx.conf.j2 @@ -35,7 +35,11 @@ server { # Send all other traffic to the main process location ~* ^(\\/_matrix|\\/_synapse) { +{% if using_unix_sockets %} + proxy_pass http://unix:/run/main_public.sock; +{% else %} proxy_pass http://localhost:8080; +{% endif %} proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Host $host; diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2 index 92d25386dc..1dfc60ad11 100644 --- a/docker/conf-workers/shared.yaml.j2 +++ b/docker/conf-workers/shared.yaml.j2 @@ -6,6 +6,9 @@ {% if enable_redis %} redis: enabled: true + {% if using_unix_sockets %} + path: /tmp/redis.sock + {% endif %} {% endif %} {% if appservice_registrations is not none %} diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 index 9f1e03cfc0..da93358051 100644 --- a/docker/conf-workers/supervisord.conf.j2 +++ b/docker/conf-workers/supervisord.conf.j2 @@ -19,7 +19,11 @@ username=www-data autorestart=true [program:redis] +{% if using_unix_sockets %} +command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock +{% else %} command=/usr/local/bin/prefix-log /usr/local/bin/redis-server +{% endif %} priority=1 stdout_logfile=/dev/stdout stdout_logfile_maxbytes=0 diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2 index 44c6e413cf..29ec74b4ea 100644 --- a/docker/conf-workers/worker.yaml.j2 +++ b/docker/conf-workers/worker.yaml.j2 @@ -8,7 +8,11 @@ worker_name: "{{ name }}" worker_listeners: - type: http +{% if using_unix_sockets %} + path: "/run/worker.{{ port }}" +{% else %} port: {{ port }} +{% endif %} {% if listener_resources %} resources: - names: diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index f10f78a48c..c46b955d63 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -36,12 +36,17 @@ listeners: # Allow configuring in case we want to reverse proxy 8008 # using another process in the same container +{% if SYNAPSE_USE_UNIX_SOCKET %} + # Unix sockets don't care about TLS or IP addresses or ports + - path: '/run/main_public.sock' + type: http +{% else %} - port: {{ SYNAPSE_HTTP_PORT or 8008 }} tls: false bind_addresses: ['::'] type: http x_forwarded: false - +{% endif %} resources: - names: [client] compress: true @@ -57,8 +62,11 @@ database: user: "{{ POSTGRES_USER or "synapse" }}" password: "{{ POSTGRES_PASSWORD }}" database: "{{ POSTGRES_DB or "synapse" }}" +{% if not SYNAPSE_USE_UNIX_SOCKET %} +{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #} host: "{{ POSTGRES_HOST or "db" }}" port: "{{ POSTGRES_PORT or "5432" }}" +{% endif %} cp_min: 5 cp_max: 10 {% else %} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 62fb88daab..dc824038b5 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -74,6 +74,9 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 MAIN_PROCESS_INSTANCE_NAME = "main" MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1" MAIN_PROCESS_REPLICATION_PORT = 9093 +# Obviously, these would only be used with the UNIX socket option +MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock" +MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock" # A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced # during processing with the name of the worker. @@ -407,11 +410,15 @@ def add_worker_roles_to_shared_config( ) # Map of stream writer instance names to host/ports combos - instance_map[worker_name] = { - "host": "localhost", - "port": worker_port, - } - + if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False): + instance_map[worker_name] = { + "path": f"/run/worker.{worker_port}", + } + else: + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } # Update the list of stream writers. It's convenient that the name of the worker # type is the same as the stream to write. Iterate over the whole list in case there # is more than one. @@ -423,10 +430,15 @@ def add_worker_roles_to_shared_config( # Map of stream writer instance names to host/ports combos # For now, all stream writers need http replication ports - instance_map[worker_name] = { - "host": "localhost", - "port": worker_port, - } + if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False): + instance_map[worker_name] = { + "path": f"/run/worker.{worker_port}", + } + else: + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } def merge_worker_template_configs( @@ -718,17 +730,29 @@ def generate_worker_files( # Note that yaml cares about indentation, so care should be taken to insert lines # into files at the correct indentation below. + # Convenience helper for if using unix sockets instead of host:port + using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False) # First read the original config file and extract the listeners block. Then we'll # add another listener for replication. Later we'll write out the result to the # shared config file. - listeners = [ - { - "port": MAIN_PROCESS_REPLICATION_PORT, - "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS, - "type": "http", - "resources": [{"names": ["replication"]}], - } - ] + listeners: List[Any] + if using_unix_sockets: + listeners = [ + { + "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH, + "type": "http", + "resources": [{"names": ["replication"]}], + } + ] + else: + listeners = [ + { + "port": MAIN_PROCESS_REPLICATION_PORT, + "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS, + "type": "http", + "resources": [{"names": ["replication"]}], + } + ] with open(config_path) as file_stream: original_config = yaml.safe_load(file_stream) original_listeners = original_config.get("listeners") @@ -769,7 +793,17 @@ def generate_worker_files( # A list of internal endpoints to healthcheck, starting with the main process # which exists even if no workers do. - healthcheck_urls = ["http://localhost:8080/health"] + # This list ends up being part of the command line to curl, (curl added support for + # Unix sockets in version 7.40). + if using_unix_sockets: + healthcheck_urls = [ + f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} " + # The scheme and hostname from the following URL are ignored. + # The only thing that matters is the path `/health` + "http://localhost/health" + ] + else: + healthcheck_urls = ["http://localhost:8080/health"] # Get the set of all worker types that we have configured all_worker_types_in_use = set(chain(*requested_worker_types.values())) @@ -806,8 +840,12 @@ def generate_worker_files( # given worker_type needs to stay assigned and not be replaced. worker_config["shared_extra_conf"].update(shared_config) shared_config = worker_config["shared_extra_conf"] - - healthcheck_urls.append("http://localhost:%d/health" % (worker_port,)) + if using_unix_sockets: + healthcheck_urls.append( + f"--unix-socket /run/worker.{worker_port} http://localhost/health" + ) + else: + healthcheck_urls.append("http://localhost:%d/health" % (worker_port,)) # Update the shared config with sharding-related options if necessary add_worker_roles_to_shared_config( @@ -826,6 +864,7 @@ def generate_worker_files( "/conf/workers/{name}.yaml".format(name=worker_name), **worker_config, worker_log_config_filepath=log_config_filepath, + using_unix_sockets=using_unix_sockets, ) # Save this worker's port number to the correct nginx upstreams @@ -846,8 +885,13 @@ def generate_worker_files( nginx_upstream_config = "" for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items(): body = "" - for port in upstream_worker_ports: - body += f" server localhost:{port};\n" + if using_unix_sockets: + for port in upstream_worker_ports: + body += f" server unix:/run/worker.{port};\n" + + else: + for port in upstream_worker_ports: + body += f" server localhost:{port};\n" # Add to the list of configured upstreams nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format( @@ -877,10 +921,15 @@ def generate_worker_files( # If there are workers, add the main process to the instance_map too. if workers_in_use: instance_map = shared_config.setdefault("instance_map", {}) - instance_map[MAIN_PROCESS_INSTANCE_NAME] = { - "host": MAIN_PROCESS_LOCALHOST_ADDRESS, - "port": MAIN_PROCESS_REPLICATION_PORT, - } + if using_unix_sockets: + instance_map[MAIN_PROCESS_INSTANCE_NAME] = { + "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH, + } + else: + instance_map[MAIN_PROCESS_INSTANCE_NAME] = { + "host": MAIN_PROCESS_LOCALHOST_ADDRESS, + "port": MAIN_PROCESS_REPLICATION_PORT, + } # Shared homeserver config convert( @@ -890,6 +939,7 @@ def generate_worker_files( appservice_registrations=appservice_registrations, enable_redis=workers_in_use, workers_in_use=workers_in_use, + using_unix_sockets=using_unix_sockets, ) # Nginx config @@ -900,6 +950,7 @@ def generate_worker_files( upstream_directives=nginx_upstream_config, tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"), tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"), + using_unix_sockets=using_unix_sockets, ) # Supervisord config @@ -909,6 +960,7 @@ def generate_worker_files( "/etc/supervisor/supervisord.conf", main_config_path=config_path, enable_redis=workers_in_use, + using_unix_sockets=using_unix_sockets, ) convert( diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index e9210b1776..698687b91f 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -370,6 +370,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data See the [worker documentation](../workers.md) for additional information on workers. - Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one. - Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker. +- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable). To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g: ```sh diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index ff59cbccc1..d9286e83bc 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -462,6 +462,20 @@ See the docs [request log format](../administration/request_log.md). * `additional_resources`: Only valid for an 'http' listener. A map of additional endpoints which should be loaded via dynamic modules. +Unix socket support (_Added in Synapse 1.88.0_): +* `path`: A path and filename for a Unix socket. Make sure it is located in a + directory with read and write permissions, and that it already exists (the directory + will not be created). Defaults to `None`. + * **Note**: The use of both `path` and `port` options for the same `listener` is not + compatible. + * The `x_forwarded` option defaults to true when using Unix sockets and can be omitted. + * Other options that would not make sense to use with a UNIX socket, such as + `bind_addresses` and `tls` will be ignored and can be removed. +* `mode`: The file permissions to set on the UNIX socket. Defaults to `666` +* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`). + Also make sure that `metrics` is not included in `resources` -> `names` + + Valid resource names are: * `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`. @@ -474,7 +488,7 @@ Valid resource names are: * `media`: the media API (/_matrix/media). -* `metrics`: the metrics interface. See [here](../../metrics-howto.md). +* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets) * `openid`: OpenID authentication. See [here](../../openid.md). @@ -533,6 +547,22 @@ listeners: bind_addresses: ['::1', '127.0.0.1'] type: manhole ``` +Example configuration #3: +```yaml +listeners: + # Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering + # lightweight interprocess communication without TCP/IP overhead, avoid port + # conflicts, and providing enhanced security through system file permissions. + # + # Note that x_forwarded will default to true, when using a UNIX socket. Please see + # https://matrix-org.github.io/synapse/latest/reverse_proxy.html. + # + - path: /var/run/synapse/main_public.sock + type: http + resources: + - names: [client, federation] +``` + --- ### `manhole_settings` @@ -3949,6 +3979,14 @@ instance_map: host: localhost port: 8034 ``` +Example configuration(#2, for UNIX sockets): +```yaml +instance_map: + main: + path: /var/run/synapse/main_replication.sock + worker1: + path: /var/run/synapse/worker1_replication.sock +``` --- ### `stream_writers` @@ -4108,6 +4146,18 @@ worker_listeners: resources: - names: [client, federation] ``` +Example configuration(#2, using UNIX sockets with a `replication` listener): +```yaml +worker_listeners: + - type: http + path: /var/run/synapse/worker_public.sock + resources: + - names: [client, federation] + - type: http + path: /var/run/synapse/worker_replication.sock + resources: + - names: [replication] +``` --- ### `worker_manhole` diff --git a/docs/workers.md b/docs/workers.md index 828f082e75..735cd3f18d 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -95,9 +95,12 @@ for the main process * Secondly, you need to enable [redis-based replication](usage/configuration/config_documentation.md#redis) * You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map) -with the `main` process defined, as well as the relevant connection information from -it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined -is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to. +with the `main` process defined, as well as the relevant connection information from +it's HTTP `replication` listener (defined in step 1 above). + * Note that the `host` defined is the address the worker needs to look for the `main` + process at, not necessarily the same address that is bound to. + * If you are using Unix sockets for the `replication` resource, make sure to + use a `path` to the socket file instead of a `port`. * Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret) can be used to authenticate HTTP traffic between workers. For example: diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 24b83cfeb6..fea76cb5af 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -253,6 +253,10 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true fi +if [[ -n "$UNIX_SOCKETS" ]]; then + # Enable full on Unix socket mode for Synapse, Redis and Postgresql + export PASS_SYNAPSE_USE_UNIX_SOCKET=1 +fi if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then # Set the log level to what is desired diff --git a/synapse/config/workers.py b/synapse/config/workers.py index ccfe75eaf3..e55ca12a36 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -94,7 +94,7 @@ class ConfigModel(BaseModel): allow_mutation = False -class InstanceLocationConfig(ConfigModel): +class InstanceTcpLocationConfig(ConfigModel): """The host and port to talk to an instance via HTTP replication.""" host: StrictStr @@ -110,6 +110,23 @@ class InstanceLocationConfig(ConfigModel): return f"{self.host}:{self.port}" +class InstanceUnixLocationConfig(ConfigModel): + """The socket file to talk to an instance via HTTP replication.""" + + path: StrictStr + + def scheme(self) -> str: + """Hardcode a retrievable scheme""" + return "unix" + + def netloc(self) -> str: + """Nicely format the address location data""" + return f"{self.path}" + + +InstanceLocationConfig = Union[InstanceTcpLocationConfig, InstanceUnixLocationConfig] + + @attr.s class WriterLocations: """Specifies the instances that write various streams. @@ -270,9 +287,12 @@ class WorkerConfig(Config): % MAIN_PROCESS_INSTANCE_MAP_NAME ) + # type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently self.instance_map: Dict[ str, InstanceLocationConfig - ] = parse_and_validate_mapping(instance_map, InstanceLocationConfig) + ] = parse_and_validate_mapping( + instance_map, InstanceLocationConfig # type: ignore[arg-type] + ) # Map from type of streams to source, c.f. WriterLocations. writers = config.get("stream_writers") or {} diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py index d6ba6f0e57..3ba2f22dfd 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py @@ -18,7 +18,11 @@ from typing import Dict, Optional from zope.interface import implementer from twisted.internet import defer -from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS +from twisted.internet.endpoints import ( + HostnameEndpoint, + UNIXClientEndpoint, + wrapClientTLS, +) from twisted.internet.interfaces import IStreamClientEndpoint from twisted.python.failure import Failure from twisted.web.client import URI, HTTPConnectionPool, _AgentBase @@ -32,7 +36,11 @@ from twisted.web.iweb import ( IResponse, ) -from synapse.config.workers import InstanceLocationConfig +from synapse.config.workers import ( + InstanceLocationConfig, + InstanceTcpLocationConfig, + InstanceUnixLocationConfig, +) from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -40,7 +48,7 @@ logger = logging.getLogger(__name__) @implementer(IAgentEndpointFactory) class ReplicationEndpointFactory: - """Connect to a given TCP socket""" + """Connect to a given TCP or UNIX socket""" def __init__( self, @@ -64,24 +72,27 @@ class ReplicationEndpointFactory: # The given URI has a special scheme and includes the worker name. The # actual connection details are pulled from the instance map. worker_name = uri.netloc.decode("utf-8") - scheme = self.instance_map[worker_name].scheme() + location_config = self.instance_map[worker_name] + scheme = location_config.scheme() - if scheme in ("http", "https"): + if isinstance(location_config, InstanceTcpLocationConfig): endpoint = HostnameEndpoint( self.reactor, - self.instance_map[worker_name].host, - self.instance_map[worker_name].port, + location_config.host, + location_config.port, ) if scheme == "https": endpoint = wrapClientTLS( # The 'port' argument below isn't actually used by the function self.context_factory.creatorForNetloc( - self.instance_map[worker_name].host.encode("utf-8"), - self.instance_map[worker_name].port, + location_config.host.encode("utf-8"), + location_config.port, ), endpoint, ) return endpoint + elif isinstance(location_config, InstanceUnixLocationConfig): + return UNIXClientEndpoint(self.reactor, location_config.path) else: raise SchemeNotSupported(f"Unsupported scheme: {scheme}") @@ -138,13 +149,16 @@ class ReplicationAgent(_AgentBase): An existing connection from the connection pool may be used or a new one may be created. - Currently, HTTP and HTTPS schemes are supported in uri. + Currently, HTTP, HTTPS and UNIX schemes are supported in uri. This is copied from twisted.web.client.Agent, except: - * It uses a different pool key (combining the host & port). - * It does not call _ensureValidURI(...) since it breaks on some - UNIX paths. + * It uses a different pool key (combining the scheme with either host & port or + socket path). + * It does not call _ensureValidURI(...) as the strictness of IDNA2008 is not + required when using a worker's name as a 'hostname' for Synapse HTTP + Replication machinery. Specifically, this allows a range of ascii characters + such as '+' and '_' in hostnames/worker's names. See: twisted.web.iweb.IAgent.request """ @@ -154,9 +168,12 @@ class ReplicationAgent(_AgentBase): except SchemeNotSupported: return defer.fail(Failure()) + worker_name = parsedURI.netloc.decode("utf-8") + key_scheme = self._endpointFactory.instance_map[worker_name].scheme() + key_netloc = self._endpointFactory.instance_map[worker_name].netloc() # This sets the Pool key to be: - # (http(s), ) - key = (parsedURI.scheme, parsedURI.netloc) + # (http(s), ) or (unix, ) + key = (key_scheme, key_netloc) # _requestWithEndpoint comes from _AgentBase class return self._requestWithEndpoint( diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 75217e3f45..be910128aa 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -1070,7 +1070,7 @@ def trace_servlet( tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), - tags.PEER_HOST_IPV6: request.getClientAddress().host, + tags.PEER_HOST_IPV6: request.get_client_ip_if_available(), } request_name = request.request_metrics.name @@ -1091,9 +1091,11 @@ def trace_servlet( # with JsonResource). scope.span.set_operation_name(request.request_metrics.name) + # Mypy seems to think that start_context.tag below can be Optional[str], but + # that doesn't appear to be correct and works in practice. request_tags[ SynapseTags.REQUEST_TAG - ] = request.request_metrics.start_context.tag + ] = request.request_metrics.start_context.tag # type: ignore[assignment] # set the tags *after* the servlet completes, in case it decided to # prioritise the span (tags will get dropped on unprioritised spans) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index eb9b1f1cd9..39aadb9ed5 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -22,6 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource from synapse.app.generic_worker import GenericWorkerServer +from synapse.config.workers import InstanceTcpLocationConfig, InstanceUnixLocationConfig from synapse.http.site import SynapseRequest, SynapseSite from synapse.replication.http import ReplicationRestResource from synapse.replication.tcp.client import ReplicationDataHandler @@ -339,7 +340,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # `_handle_http_replication_attempt` like we do with the master HS. instance_name = worker_hs.get_instance_name() instance_loc = worker_hs.config.worker.instance_map.get(instance_name) - if instance_loc: + if instance_loc and isinstance(instance_loc, InstanceTcpLocationConfig): # Ensure the host is one that has a fake DNS entry. if instance_loc.host not in self.reactor.lookups: raise Exception( @@ -360,6 +361,10 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): instance_loc.port, lambda: self._handle_http_replication_attempt(worker_hs, port), ) + elif instance_loc and isinstance(instance_loc, InstanceUnixLocationConfig): + raise Exception( + "Unix sockets are not supported for unit tests at this time." + ) store = worker_hs.get_datastores().main store.db_pool._db_pool = self.database_pool._db_pool diff --git a/tests/server.py b/tests/server.py index a12c3e3b9a..c84a524e8c 100644 --- a/tests/server.py +++ b/tests/server.py @@ -53,6 +53,7 @@ from twisted.internet.interfaces import ( IConnector, IConsumer, IHostnameResolver, + IListeningPort, IProducer, IProtocol, IPullProducer, @@ -62,7 +63,7 @@ from twisted.internet.interfaces import ( IResolverSimple, ITransport, ) -from twisted.internet.protocol import ClientFactory, DatagramProtocol +from twisted.internet.protocol import ClientFactory, DatagramProtocol, Factory from twisted.python import threadpool from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock @@ -523,6 +524,35 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): """ self._tcp_callbacks[(host, port)] = callback + def connectUNIX( + self, + address: str, + factory: ClientFactory, + timeout: float = 30, + checkPID: int = 0, + ) -> IConnector: + """ + Unix sockets aren't supported for unit tests yet. Make it obvious to any + developer trying it out that they will need to do some work before being able + to use it in tests. + """ + raise Exception("Unix sockets are not implemented for tests yet, sorry.") + + def listenUNIX( + self, + address: str, + factory: Factory, + backlog: int = 50, + mode: int = 0o666, + wantPID: int = 0, + ) -> IListeningPort: + """ + Unix sockets aren't supported for unit tests yet. Make it obvious to any + developer trying it out that they will need to do some work before being able + to use it in tests. + """ + raise Exception("Unix sockets are not implemented for tests, sorry") + def connectTCP( self, host: str, From d7fc87d9736007300df8c1c8433a13542cb420dc Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 11 Jul 2023 15:32:50 -0500 Subject: [PATCH 213/562] Bump Unix sockets intro version (#15924) https://github.com/matrix-org/synapse/pull/15708 didn't quite make the cut for `1.88.0` this morning. --- changelog.d/15924.feature | 1 + docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15924.feature diff --git a/changelog.d/15924.feature b/changelog.d/15924.feature new file mode 100644 index 0000000000..06a6c959ab --- /dev/null +++ b/changelog.d/15924.feature @@ -0,0 +1 @@ +Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index d9286e83bc..22cd1772dc 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -462,7 +462,7 @@ See the docs [request log format](../administration/request_log.md). * `additional_resources`: Only valid for an 'http' listener. A map of additional endpoints which should be loaded via dynamic modules. -Unix socket support (_Added in Synapse 1.88.0_): +Unix socket support (_Added in Synapse 1.89.0_): * `path`: A path and filename for a Unix socket. Make sure it is located in a directory with read and write permissions, and that it already exists (the directory will not be created). Defaults to `None`. From ae391db777af0146ad8b50fa5bcbd7cc39c0d886 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 11 Jul 2023 17:12:41 -0500 Subject: [PATCH 214/562] Better warning in logs when we fail to fetch an alias (#15922) **Before:** ``` Error retrieving alias ``` **After:** ``` Error retrieving alias #foo:bar -> 401 Unauthorized ``` *Spawning from creating the [manual testing strategy for the outbound federation proxy](https://github.com/matrix-org/synapse/pull/15773).* --- changelog.d/15922.misc | 1 + synapse/handlers/directory.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15922.misc diff --git a/changelog.d/15922.misc b/changelog.d/15922.misc new file mode 100644 index 0000000000..93fc644877 --- /dev/null +++ b/changelog.d/15922.misc @@ -0,0 +1 @@ +Add details to warning in log when we fail to fetch an alias. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1e0623c7f8..623a4e7b1d 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -277,7 +277,9 @@ class DirectoryHandler: except RequestSendFailed: raise SynapseError(502, "Failed to fetch alias") except CodeMessageException as e: - logging.warning("Error retrieving alias") + logging.warning( + "Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg + ) if e.code == 404: fed_result = None else: From 0371a354cf812593c724402c0dcfdab7257613ae Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 11 Jul 2023 17:13:54 -0500 Subject: [PATCH 215/562] Better clarify how to run a worker instance (pass both configs) (#15921) Previously, if you just followed the instructions per the docs, you just ran into an error: ```sh $ poetry run synapse_worker --config-path homeserver_generic_worker1.yaml Missing mandatory `server_name` config option. ``` --- changelog.d/15921.doc | 1 + docs/workers.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15921.doc diff --git a/changelog.d/15921.doc b/changelog.d/15921.doc new file mode 100644 index 0000000000..02f34c73d5 --- /dev/null +++ b/changelog.d/15921.doc @@ -0,0 +1 @@ +Better clarify how to run a worker instance (pass both configs). diff --git a/docs/workers.md b/docs/workers.md index 735cd3f18d..cf9c0add82 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou You can start the main Synapse process with Poetry by running the following command: ```console -poetry run synapse_homeserver -c [your homeserver.yaml] +poetry run synapse_homeserver --config-file [your homeserver.yaml] ``` For worker setups, you can run the following command ```console -poetry run synapse_worker -c [your worker.yaml] +poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml] ``` ## Available worker applications From 3bdb9b07fd19a57f563b84fb02bfcbaa2ef3083b Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 11 Jul 2023 17:15:06 -0500 Subject: [PATCH 216/562] Make it more obvious which Python version runs on a given Linux distribution (#15909) Make it more obvious which Python version runs on a given Linux distribution so when we end up dropping support for a given Python version, we can more easily find the reference to the Python version and remove any references for the distribution. We don't want to be running tests or building packages on a distribution that no longer has a supported Python version. This way, we can avoid another situation like when we dropped support for Python 3.7 but forgot to drop the Debian Buster references everywhere (https://github.com/matrix-org/synapse/pull/15893) --- changelog.d/15909.misc | 1 + scripts-dev/build_debian_packages.py | 18 +++++++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 changelog.d/15909.misc diff --git a/changelog.d/15909.misc b/changelog.d/15909.misc new file mode 100644 index 0000000000..ba36a97442 --- /dev/null +++ b/changelog.d/15909.misc @@ -0,0 +1 @@ +Document which Python version runs on a given Linux distribution so we can more easily clean up later. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 8fe10f2cb5..1954835474 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -22,14 +22,18 @@ from typing import Collection, Optional, Sequence, Set # These are expanded inside the dockerfile to be a fully qualified image name. # e.g. docker.io/library/debian:bullseye +# +# If an EOL is forced by a Python version and we're dropping support for it, make sure +# to remove references to the distibution across Synapse (search for "bullseye" for +# example) DISTS = ( - "debian:bullseye", - "debian:bookworm", - "debian:sid", - "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) - "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) - "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) - "ubuntu:lunar", # 23.04 (EOL 2024-01) + "debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05) + "debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) + "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) + "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) + "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) + "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04) + "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) ) DESC = """\ From 8eb7bb975eed0250aa8be5e8fb70c586cbff6b37 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 12 Jul 2023 11:09:13 +0200 Subject: [PATCH 217/562] Mark get_user_in_directory private since only used in tests (#15884) --- changelog.d/15884.misc | 1 + .../storage/databases/main/user_directory.py | 9 +-------- tests/handlers/test_user_directory.py | 18 +++++++++--------- tests/rest/admin/test_user.py | 6 +++--- 4 files changed, 14 insertions(+), 20 deletions(-) create mode 100644 changelog.d/15884.misc diff --git a/changelog.d/15884.misc b/changelog.d/15884.misc new file mode 100644 index 0000000000..8e73a9a6cd --- /dev/null +++ b/changelog.d/15884.misc @@ -0,0 +1 @@ +Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index b0a06baf4f..924022c95c 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -62,7 +62,6 @@ from synapse.types import ( get_domain_from_id, get_localpart_from_id, ) -from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -771,9 +770,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # This should be unreachable. raise Exception("Unrecognized database engine") - for p in profiles: - txn.call_after(self.get_user_in_directory.invalidate, (p.user_id,)) - async def add_users_who_share_private_room( self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]] ) -> None: @@ -831,14 +827,12 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): txn.execute(f"{truncate} user_directory_search") txn.execute(f"{truncate} users_in_public_rooms") txn.execute(f"{truncate} users_who_share_private_rooms") - txn.call_after(self.get_user_in_directory.invalidate_all) await self.db_pool.runInteraction( "delete_all_from_user_dir", _delete_all_from_user_dir_txn ) - @cached() - async def get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]: + async def _get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]: return await self.db_pool.simple_select_one( table="user_directory", keyvalues={"user_id": user_id}, @@ -900,7 +894,6 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): table="users_who_share_private_rooms", keyvalues={"other_user_id": user_id}, ) - txn.call_after(self.get_user_in_directory.invalidate, (user_id,)) await self.db_pool.runInteraction( "remove_from_user_dir", _remove_from_user_dir_txn diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 15a7dc6818..9785dd698b 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -356,7 +356,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): support_user_id, ProfileInfo("I love support me", None) ) ) - profile = self.get_success(self.store.get_user_in_directory(support_user_id)) + profile = self.get_success(self.store._get_user_in_directory(support_user_id)) self.assertIsNone(profile) display_name = "display_name" @@ -364,7 +364,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): self.get_success( self.handler.handle_local_profile_change(regular_user_id, profile_info) ) - profile = self.get_success(self.store.get_user_in_directory(regular_user_id)) + profile = self.get_success(self.store._get_user_in_directory(regular_user_id)) assert profile is not None self.assertTrue(profile["display_name"] == display_name) @@ -383,7 +383,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): ) # profile is in directory - profile = self.get_success(self.store.get_user_in_directory(r_user_id)) + profile = self.get_success(self.store._get_user_in_directory(r_user_id)) assert profile is not None self.assertTrue(profile["display_name"] == display_name) @@ -392,7 +392,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): self.get_success(self.handler.handle_local_user_deactivated(r_user_id)) # profile is not in directory - profile = self.get_success(self.store.get_user_in_directory(r_user_id)) + profile = self.get_success(self.store._get_user_in_directory(r_user_id)) self.assertIsNone(profile) # update profile after deactivation @@ -401,7 +401,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): ) # profile is furthermore not in directory - profile = self.get_success(self.store.get_user_in_directory(r_user_id)) + profile = self.get_success(self.store._get_user_in_directory(r_user_id)) self.assertIsNone(profile) def test_handle_local_profile_change_with_appservice_user(self) -> None: @@ -411,7 +411,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): ) # profile is not in directory - profile = self.get_success(self.store.get_user_in_directory(as_user_id)) + profile = self.get_success(self.store._get_user_in_directory(as_user_id)) self.assertIsNone(profile) # update profile @@ -421,13 +421,13 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): ) # profile is still not in directory - profile = self.get_success(self.store.get_user_in_directory(as_user_id)) + profile = self.get_success(self.store._get_user_in_directory(as_user_id)) self.assertIsNone(profile) def test_handle_local_profile_change_with_appservice_sender(self) -> None: # profile is not in directory profile = self.get_success( - self.store.get_user_in_directory(self.appservice.sender) + self.store._get_user_in_directory(self.appservice.sender) ) self.assertIsNone(profile) @@ -441,7 +441,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): # profile is still not in directory profile = self.get_success( - self.store.get_user_in_directory(self.appservice.sender) + self.store._get_user_in_directory(self.appservice.sender) ) self.assertIsNone(profile) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index a17a1bb1d8..6f7b4bf642 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -2472,7 +2472,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): """ # is in user directory - profile = self.get_success(self.store.get_user_in_directory(self.other_user)) + profile = self.get_success(self.store._get_user_in_directory(self.other_user)) assert profile is not None self.assertTrue(profile["display_name"] == "User") @@ -2489,7 +2489,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertTrue(channel.json_body["deactivated"]) # is not in user directory - profile = self.get_success(self.store.get_user_in_directory(self.other_user)) + profile = self.get_success(self.store._get_user_in_directory(self.other_user)) self.assertIsNone(profile) # Set new displayname user @@ -2506,7 +2506,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("Foobar", channel.json_body["displayname"]) # is not in user directory - profile = self.get_success(self.store.get_user_in_directory(self.other_user)) + profile = self.get_success(self.store._get_user_in_directory(self.other_user)) self.assertIsNone(profile) def test_reactivate_user(self) -> None: From 36c6b92bfc6570b7b8f3d0416ec4a47a3b7846d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 Jul 2023 12:02:11 +0100 Subject: [PATCH 218/562] Fix push for invites received over federation (#15820) --- changelog.d/15820.bugfix | 1 + synapse/push/push_tools.py | 37 ++++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15820.bugfix diff --git a/changelog.d/15820.bugfix b/changelog.d/15820.bugfix new file mode 100644 index 0000000000..d259d32061 --- /dev/null +++ b/changelog.d/15820.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where remote invites weren't correctly pushed. diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 7ee07e4bee..a94a6e97c1 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -13,6 +13,7 @@ # limitations under the License. from typing import Dict +from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase from synapse.push.presentable_names import calculate_room_name, name_from_member_event from synapse.storage.controllers import StorageControllers @@ -49,7 +50,41 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - async def get_context_for_event( storage: StorageControllers, ev: EventBase, user_id: str ) -> Dict[str, str]: - ctx = {} + ctx: Dict[str, str] = {} + + if ev.internal_metadata.outlier: + # We don't have state for outliers, so we can't compute the context + # except for invites and knocks. (Such events are known as 'out-of-band + # memberships' for the user). + if ev.type != EventTypes.Member: + return ctx + + # We might be able to pull out the display name for the sender straight + # from the membership event + event_display_name = ev.content.get("displayname") + if event_display_name and ev.state_key == ev.sender: + ctx["sender_display_name"] = event_display_name + + room_state = [] + if ev.content.get("membership") == Membership.INVITE: + room_state = ev.unsigned.get("invite_room_state", []) + elif ev.content.get("membership") == Membership.KNOCK: + room_state = ev.unsigned.get("knock_room_state", []) + + # Ideally we'd reuse the logic in `calculate_room_name`, but that gets + # complicated to handle partial events vs pulling events from the DB. + for state_dict in room_state: + type_tuple = (state_dict["type"], state_dict.get("state_key")) + if type_tuple == (EventTypes.Member, ev.sender): + display_name = state_dict["content"].get("displayname") + if display_name: + ctx["sender_display_name"] = display_name + elif type_tuple == (EventTypes.Name, ""): + room_name = state_dict["content"].get("name") + if room_name: + ctx["name"] = room_name + + return ctx room_state_ids = await storage.state.get_state_ids_for_event(ev.event_id) From 5bdf01fccdee521390a03ea5a148eded7d0ad426 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 12 Jul 2023 08:39:25 -0400 Subject: [PATCH 219/562] Fix running with an empty experimental features section. (#15925) --- changelog.d/15925.bugfix | 1 + synapse/config/auth.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15925.bugfix diff --git a/changelog.d/15925.bugfix b/changelog.d/15925.bugfix new file mode 100644 index 0000000000..e3ef783576 --- /dev/null +++ b/changelog.d/15925.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. diff --git a/synapse/config/auth.py b/synapse/config/auth.py index c7ab428f28..3b4c77f572 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -31,7 +31,7 @@ class AuthConfig(Config): # The default value of password_config.enabled is True, unless msc3861 is enabled. msc3861_enabled = ( - config.get("experimental_features", {}) + (config.get("experimental_features") or {}) .get("msc3861", {}) .get("enabled", False) ) From 204b66c203564a019f1ecb4fb3909bfb375ce615 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 12 Jul 2023 10:30:05 -0400 Subject: [PATCH 220/562] Remove unneeded __init__. (#15926) Remove an __init__ which only calls super() without changing the input arguments. --- changelog.d/15926.misc | 1 + synapse/federation/transport/server/federation.py | 9 --------- 2 files changed, 1 insertion(+), 9 deletions(-) create mode 100644 changelog.d/15926.misc diff --git a/changelog.d/15926.misc b/changelog.d/15926.misc new file mode 100644 index 0000000000..bf4c0fa5d0 --- /dev/null +++ b/changelog.d/15926.misc @@ -0,0 +1 @@ +Remove unneeded `__init__`. diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 3a744e25be..3248953b48 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -432,15 +432,6 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet): PREFIX = FEDERATION_V2_PREFIX - def __init__( - self, - hs: "HomeServer", - authenticator: Authenticator, - ratelimiter: FederationRateLimiter, - server_name: str, - ): - super().__init__(hs, authenticator, ratelimiter, server_name) - async def on_PUT( self, origin: str, From 2cacd0849a02d43f88b6c15ee862398159ab827c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 13 Jul 2023 11:21:28 +0100 Subject: [PATCH 221/562] Bump types-pillow from 9.5.0.4 to 10.0.0.1 (#15932) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index b903fdc9ae..07a0ed5134 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1081,6 +1081,8 @@ files = [ {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"}, {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"}, {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"}, + {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"}, + {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"}, {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"}, {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"}, {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, @@ -2904,13 +2906,13 @@ files = [ [[package]] name = "types-pillow" -version = "9.5.0.4" +version = "10.0.0.1" description = "Typing stubs for Pillow" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.5.0.4.tar.gz", hash = "sha256:f1b6af47abd151847ee25911ffeba784899bc7dc7f9eba8ca6a5aac522b012ef"}, - {file = "types_Pillow-9.5.0.4-py3-none-any.whl", hash = "sha256:69427d9fa4320ff6e30f00fb9c0dd71185dc0a16de4757774220104759483466"}, + {file = "types-Pillow-10.0.0.1.tar.gz", hash = "sha256:834a07a04504f8bf37936679bc6a5802945e7644d0727460c0c4d4307967e2a3"}, + {file = "types_Pillow-10.0.0.1-py3-none-any.whl", hash = "sha256:be576b67418f1cb3b93794cf7946581be1009a33a10085b3c132eb0875a819b4"}, ] [[package]] From 20ae617d1417f8dd52e20b3a20cb01b4c2fd87c9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 13 Jul 2023 07:23:56 -0400 Subject: [PATCH 222/562] Stop accepting 'user' parameter for application service registration. (#15928) This is unspecced, but has existed for a very long time. --- changelog.d/15928.removal | 1 + docs/upgrade.md | 10 ++++++++++ synapse/rest/client/register.py | 12 ++++-------- 3 files changed, 15 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15928.removal diff --git a/changelog.d/15928.removal b/changelog.d/15928.removal new file mode 100644 index 0000000000..5563213d31 --- /dev/null +++ b/changelog.d/15928.removal @@ -0,0 +1 @@ +Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. diff --git a/docs/upgrade.md b/docs/upgrade.md index b94d13c4da..5dde6c769e 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,16 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.89.0 + +## Removal of unspecced `user` property for `/register` + +Application services can no longer call `/register` with a `user` property to create new users. +The standard `username` property should be used instead. See the +[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions) +for more information. + + # Upgrading to v1.88.0 ## Minimum supported Python version diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index d59669f0b6..77e3b91b79 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -462,9 +462,9 @@ class RegisterRestServlet(RestServlet): # the auth layer will store these in sessions. desired_username = None if "username" in body: - if not isinstance(body["username"], str) or len(body["username"]) > 512: - raise SynapseError(400, "Invalid username") desired_username = body["username"] + if not isinstance(desired_username, str) or len(desired_username) > 512: + raise SynapseError(400, "Invalid username") # fork off as soon as possible for ASes which have completely # different registration flows to normal users @@ -477,11 +477,6 @@ class RegisterRestServlet(RestServlet): "Appservice token must be provided when using a type of m.login.application_service", ) - # Set the desired user according to the AS API (which uses the - # 'user' key not 'username'). Since this is a new addition, we'll - # fallback to 'username' if they gave one. - desired_username = body.get("user", desired_username) - # XXX we should check that desired_username is valid. Currently # we give appservices carte blanche for any insanity in mxids, # because the IRC bridges rely on being able to register stupid @@ -489,7 +484,8 @@ class RegisterRestServlet(RestServlet): access_token = self.auth.get_access_token_from_request(request) - if not isinstance(desired_username, str): + # Desired username is either a string or None. + if desired_username is None: raise SynapseError(400, "Desired Username is missing or not a string") result = await self._do_appservice_registration( From 8d3656b994173b899ce2be18a2600273aa4e7d32 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Fri, 14 Jul 2023 13:32:13 +0100 Subject: [PATCH 223/562] Document that you cannot login as yourself on /_synapse/admin/v1/users//login (#15938) --- changelog.d/15938.doc | 1 + docs/admin_api/user_admin_api.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15938.doc diff --git a/changelog.d/15938.doc b/changelog.d/15938.doc new file mode 100644 index 0000000000..8d99e5f4ea --- /dev/null +++ b/changelog.d/15938.doc @@ -0,0 +1 @@ +Improve the documentation for the login as a user admin API. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 23f465e98d..ac4f635099 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -732,7 +732,8 @@ POST /_synapse/admin/v1/users//login An optional `valid_until_ms` field can be specified in the request body as an integer timestamp that specifies when the token should expire. By default tokens -do not expire. +do not expire. Note that this API does not allow a user to login as themselves +(to create more tokens). A response body like the following is returned: From cba2df20b5e3127767223cf077454cb66cd013fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 15 Jul 2023 21:37:59 +0100 Subject: [PATCH 224/562] Bump cryptography from 41.0.1 to 41.0.2 (#15943) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/poetry.lock b/poetry.lock index 07a0ed5134..dacdff2603 100644 --- a/poetry.lock +++ b/poetry.lock @@ -460,30 +460,34 @@ files = [ [[package]] name = "cryptography" -version = "41.0.1" +version = "41.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:f73bff05db2a3e5974a6fd248af2566134d8981fd7ab012e5dd4ddb1d9a70699"}, - {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1a5472d40c8f8e91ff7a3d8ac6dfa363d8e3138b961529c996f3e2df0c7a411a"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fa01527046ca5facdf973eef2535a27fec4cb651e4daec4d043ef63f6ecd4ca"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b46e37db3cc267b4dea1f56da7346c9727e1209aa98487179ee8ebed09d21e43"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d198820aba55660b4d74f7b5fd1f17db3aa5eb3e6893b0a41b75e84e4f9e0e4b"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:948224d76c4b6457349d47c0c98657557f429b4e93057cf5a2f71d603e2fc3a3"}, - {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:059e348f9a3c1950937e1b5d7ba1f8e968508ab181e75fc32b879452f08356db"}, - {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b4ceb5324b998ce2003bc17d519080b4ec8d5b7b70794cbd2836101406a9be31"}, - {file = "cryptography-41.0.1-cp37-abi3-win32.whl", hash = "sha256:8f4ab7021127a9b4323537300a2acfb450124b2def3756f64dc3a3d2160ee4b5"}, - {file = "cryptography-41.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:1fee5aacc7367487b4e22484d3c7e547992ed726d14864ee33c0176ae43b0d7c"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9a6c7a3c87d595608a39980ebaa04d5a37f94024c9f24eb7d10262b92f739ddb"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5d092fdfedaec4cbbffbf98cddc915ba145313a6fdaab83c6e67f4e6c218e6f3"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a8e6c2de6fbbcc5e14fd27fb24414507cb3333198ea9ab1258d916f00bc3039"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb33ccf15e89f7ed89b235cff9d49e2e62c6c981a6061c9c8bb47ed7951190bc"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f0ff6e18d13a3de56f609dd1fd11470918f770c6bd5d00d632076c727d35485"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7bfc55a5eae8b86a287747053140ba221afc65eb06207bedf6e019b8934b477c"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eb8163f5e549a22888c18b0d53d6bb62a20510060a22fd5a995ec8a05268df8a"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8dde71c4169ec5ccc1087bb7521d54251c016f126f922ab2dfe6649170a3b8c5"}, - {file = "cryptography-41.0.1.tar.gz", hash = "sha256:d34579085401d3f49762d2f7d6634d6b6c2ae1242202e860f4d26b046e3a1006"}, + {file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711"}, + {file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182"}, + {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83"}, + {file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5"}, + {file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58"}, + {file = "cryptography-41.0.2-cp37-abi3-win32.whl", hash = "sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76"}, + {file = "cryptography-41.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766"}, + {file = "cryptography-41.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa"}, + {file = "cryptography-41.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f"}, + {file = "cryptography-41.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0"}, + {file = "cryptography-41.0.2.tar.gz", hash = "sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c"}, ] [package.dependencies] From 85e0541db1c20d0e75facf9c0cd30abf0d0b4ddd Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 17 Jul 2023 09:36:12 +0100 Subject: [PATCH 225/562] Pin the rust version in `flake.nix`, and bump to 1.70.0 to fix installing `ruff` (#15940) --- changelog.d/15940.misc | 1 + flake.lock | 96 +++++++++++++++++++++++++++--------------- flake.nix | 33 ++++++++++----- 3 files changed, 87 insertions(+), 43 deletions(-) create mode 100644 changelog.d/15940.misc diff --git a/changelog.d/15940.misc b/changelog.d/15940.misc new file mode 100644 index 0000000000..eac008eb3e --- /dev/null +++ b/changelog.d/15940.misc @@ -0,0 +1 @@ +Unbreak the nix development environment by pinning the Rust version to 1.70.0. \ No newline at end of file diff --git a/flake.lock b/flake.lock index d1c933e9aa..eb5a65e445 100644 --- a/flake.lock +++ b/flake.lock @@ -22,27 +22,6 @@ "type": "github" } }, - "fenix": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ], - "rust-analyzer-src": "rust-analyzer-src" - }, - "locked": { - "lastModified": 1682490133, - "narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=", - "owner": "nix-community", - "repo": "fenix", - "rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "fenix", - "type": "github" - } - }, "flake-compat": { "flake": false, "locked": { @@ -74,6 +53,24 @@ "type": "github" } }, + "flake-utils_2": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1681202837, + "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "cfacdce06f30d2b68473a46042957675eebb3401", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, "gitignore": { "inputs": { "nixpkgs": [ @@ -200,6 +197,22 @@ "type": "github" } }, + "nixpkgs_3": { + "locked": { + "lastModified": 1681358109, + "narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, "pre-commit-hooks": { "inputs": { "flake-compat": [ @@ -231,25 +244,27 @@ "root": { "inputs": { "devenv": "devenv", - "fenix": "fenix", "nixpkgs": "nixpkgs_2", - "systems": "systems" + "rust-overlay": "rust-overlay", + "systems": "systems_2" } }, - "rust-analyzer-src": { - "flake": false, + "rust-overlay": { + "inputs": { + "flake-utils": "flake-utils_2", + "nixpkgs": "nixpkgs_3" + }, "locked": { - "lastModified": 1682426789, - "narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=", - "owner": "rust-lang", - "repo": "rust-analyzer", - "rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04", + "lastModified": 1689302058, + "narHash": "sha256-yD74lcHTrw4niXcE9goJLbzsgyce48rQQoy5jK5ZK40=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "7b8dbbf4c67ed05a9bf3d9e658c12d4108bc24c8", "type": "github" }, "original": { - "owner": "rust-lang", - "ref": "nightly", - "repo": "rust-analyzer", + "owner": "oxalica", + "repo": "rust-overlay", "type": "github" } }, @@ -267,6 +282,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index bb42c9ff9b..bacb70f478 100644 --- a/flake.nix +++ b/flake.nix @@ -46,20 +46,20 @@ systems.url = "github:nix-systems/default"; # A development environment manager built on Nix. See https://devenv.sh. devenv.url = "github:cachix/devenv/main"; - # Rust toolchains and rust-analyzer nightly. - fenix = { - url = "github:nix-community/fenix"; - inputs.nixpkgs.follows = "nixpkgs"; - }; + # Rust toolchain. + rust-overlay.url = "github:oxalica/rust-overlay"; }; - outputs = { self, nixpkgs, devenv, systems, ... } @ inputs: + outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs: let forEachSystem = nixpkgs.lib.genAttrs (import systems); in { devShells = forEachSystem (system: let - pkgs = nixpkgs.legacyPackages.${system}; + overlays = [ (import rust-overlay) ]; + pkgs = import nixpkgs { + inherit system overlays; + }; in { # Everything is configured via devenv - a Nix module for creating declarative # developer environments. See https://devenv.sh/reference/options/ for a list @@ -76,6 +76,20 @@ # Configure packages to install. # Search for package names at https://search.nixos.org/packages?channel=unstable packages = with pkgs; [ + # The rust toolchain and related tools. + # This will install the "default" profile of rust components. + # https://rust-lang.github.io/rustup/concepts/profiles.html + # + # NOTE: We currently need to set the Rust version unnecessarily high + # in order to work around https://github.com/matrix-org/synapse/issues/15939 + (rust-bin.stable."1.70.0".default.override { + # Additionally install the "rust-src" extension to allow diving into the + # Rust source code in an IDE (rust-analyzer will also make use of it). + extensions = [ "rust-src" ]; + }) + # The rust-analyzer language server implementation. + rust-analyzer + # Native dependencies for running Synapse. icu libffi @@ -124,12 +138,11 @@ # Install dependencies for the additional programming languages # involved with Synapse development. # - # * Rust is used for developing and running Synapse. # * Golang is needed to run the Complement test suite. # * Perl is needed to run the SyTest test suite. + # * Rust is used for developing and running Synapse. + # It is installed manually with `packages` above. languages.go.enable = true; - languages.rust.enable = true; - languages.rust.version = "stable"; languages.perl.enable = true; # Postgres is needed to run Synapse with postgres support and From d2f46ae37085af47f05b0c3a728365ab395c1957 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 10:30:10 +0100 Subject: [PATCH 226/562] Bump prometheus-client from 0.17.0 to 0.17.1 (#15945) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index dacdff2603..dd584a0c55 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1734,13 +1734,13 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes [[package]] name = "prometheus-client" -version = "0.17.0" +version = "0.17.1" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.6" files = [ - {file = "prometheus_client-0.17.0-py3-none-any.whl", hash = "sha256:a77b708cf083f4d1a3fb3ce5c95b4afa32b9c521ae363354a4a910204ea095ce"}, - {file = "prometheus_client-0.17.0.tar.gz", hash = "sha256:9c3b26f1535945e85b8934fb374678d263137b78ef85f305b1156c7c881cd11b"}, + {file = "prometheus_client-0.17.1-py3-none-any.whl", hash = "sha256:e537f37160f6807b8202a6fc4764cdd19bac5480ddd3e0d463c3002b34462101"}, + {file = "prometheus_client-0.17.1.tar.gz", hash = "sha256:21e674f39831ae3f8acde238afd9a27a37d0d2fb5a28ea094f0ce25d2cbf2091"}, ] [package.extras] From 6396527015e995b7bf0990357d8c5e2c3a4bc842 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 10:30:46 +0100 Subject: [PATCH 227/562] Bump pydantic from 1.10.10 to 1.10.11 (#15946) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 74 ++++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/poetry.lock b/poetry.lock index dd584a0c55..8ce11440ad 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1833,47 +1833,47 @@ files = [ [[package]] name = "pydantic" -version = "1.10.10" +version = "1.10.11" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adad1ee4ab9888f12dac2529276704e719efcf472e38df7813f5284db699b4ec"}, - {file = "pydantic-1.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a7db03339893feef2092ff7b1afc9497beed15ebd4af84c3042a74abce02d48"}, - {file = "pydantic-1.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b3714b97ff84b2689654851c2426389bcabfac9080617bcf4306c69db606f6"}, - {file = "pydantic-1.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edfdf0a5abc5c9bf2052ebaec20e67abd52e92d257e4f2d30e02c354ed3e6030"}, - {file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a3b30fd255eeeb63caa9483502ba96b7795ce5bf895c6a179b3d909d9f53a6"}, - {file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db4c7f7e60ca6f7d6c1785070f3e5771fcb9b2d88546e334d2f2c3934d949028"}, - {file = "pydantic-1.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:a2d5be50ac4a0976817144c7d653e34df2f9436d15555189f5b6f61161d64183"}, - {file = "pydantic-1.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:566a04ba755e8f701b074ffb134ddb4d429f75d5dced3fbd829a527aafe74c71"}, - {file = "pydantic-1.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f79db3652ed743309f116ba863dae0c974a41b688242482638b892246b7db21d"}, - {file = "pydantic-1.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62376890b819bebe3c717a9ac841a532988372b7e600e76f75c9f7c128219d5"}, - {file = "pydantic-1.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4870f13a4fafd5bc3e93cff3169222534fad867918b188e83ee0496452978437"}, - {file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:990027e77cda6072a566e433b6962ca3b96b4f3ae8bd54748e9d62a58284d9d7"}, - {file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8c40964596809eb616d94f9c7944511f620a1103d63d5510440ed2908fc410af"}, - {file = "pydantic-1.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:ea9eebc2ebcba3717e77cdeee3f6203ffc0e78db5f7482c68b1293e8cc156e5e"}, - {file = "pydantic-1.10.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:762aa598f79b4cac2f275d13336b2dd8662febee2a9c450a49a2ab3bec4b385f"}, - {file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dab5219659f95e357d98d70577b361383057fb4414cfdb587014a5f5c595f7b"}, - {file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3d4ee957a727ccb5a36f1b0a6dbd9fad5dedd2a41eada99a8df55c12896e18d"}, - {file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b69f9138dec566962ec65623c9d57bee44412d2fc71065a5f3ebb3820bdeee96"}, - {file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7aa75d1bd9cc275cf9782f50f60cddaf74cbaae19b6ada2a28e737edac420312"}, - {file = "pydantic-1.10.10-cp37-cp37m-win_amd64.whl", hash = "sha256:9f62a727f5c590c78c2d12fda302d1895141b767c6488fe623098f8792255fe5"}, - {file = "pydantic-1.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aac218feb4af73db8417ca7518fb3bade4534fcca6e3fb00f84966811dd94450"}, - {file = "pydantic-1.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88546dc10a40b5b52cae87d64666787aeb2878f9a9b37825aedc2f362e7ae1da"}, - {file = "pydantic-1.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c41bbaae89e32fc582448e71974de738c055aef5ab474fb25692981a08df808a"}, - {file = "pydantic-1.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b71bd504d1573b0b722ae536e8ffb796bedeef978979d076bf206e77dcc55a5"}, - {file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e088e3865a2270ecbc369924cd7d9fbc565667d9158e7f304e4097ebb9cf98dd"}, - {file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3403a090db45d4027d2344859d86eb797484dfda0706cf87af79ace6a35274ef"}, - {file = "pydantic-1.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:e0014e29637125f4997c174dd6167407162d7af0da73414a9340461ea8573252"}, - {file = "pydantic-1.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9965e49c6905840e526e5429b09e4c154355b6ecc0a2f05492eda2928190311d"}, - {file = "pydantic-1.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:748d10ab6089c5d196e1c8be9de48274f71457b01e59736f7a09c9dc34f51887"}, - {file = "pydantic-1.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86936c383f7c38fd26d35107eb669c85d8f46dfceae873264d9bab46fe1c7dde"}, - {file = "pydantic-1.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a26841be620309a9697f5b1ffc47dce74909e350c5315ccdac7a853484d468a"}, - {file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:409b810f387610cc7405ab2fa6f62bdf7ea485311845a242ebc0bd0496e7e5ac"}, - {file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ce937a2a2c020bcad1c9fde02892392a1123de6dda906ddba62bfe8f3e5989a2"}, - {file = "pydantic-1.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:37ebddef68370e6f26243acc94de56d291e01227a67b2ace26ea3543cf53dd5f"}, - {file = "pydantic-1.10.10-py3-none-any.whl", hash = "sha256:a5939ec826f7faec434e2d406ff5e4eaf1716eb1f247d68cd3d0b3612f7b4c8a"}, - {file = "pydantic-1.10.10.tar.gz", hash = "sha256:3b8d5bd97886f9eb59260594207c9f57dce14a6f869c6ceea90188715d29921a"}, + {file = "pydantic-1.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f"}, + {file = "pydantic-1.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e"}, + {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151"}, + {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7"}, + {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588"}, + {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f"}, + {file = "pydantic-1.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847"}, + {file = "pydantic-1.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb"}, + {file = "pydantic-1.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b"}, + {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae"}, + {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66"}, + {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216"}, + {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c"}, + {file = "pydantic-1.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b"}, + {file = "pydantic-1.10.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6"}, + {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713"}, + {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c"}, + {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248"}, + {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36"}, + {file = "pydantic-1.10.11-cp37-cp37m-win_amd64.whl", hash = "sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629"}, + {file = "pydantic-1.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3"}, + {file = "pydantic-1.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f"}, + {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb"}, + {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d"}, + {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f"}, + {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e"}, + {file = "pydantic-1.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19"}, + {file = "pydantic-1.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622"}, + {file = "pydantic-1.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1"}, + {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999"}, + {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303"}, + {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604"}, + {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13"}, + {file = "pydantic-1.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e"}, + {file = "pydantic-1.10.11-py3-none-any.whl", hash = "sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e"}, + {file = "pydantic-1.10.11.tar.gz", hash = "sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528"}, ] [package.dependencies] From b0e66721a5f90863ea372b76ea3281f3cdfc710a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 10:33:47 +0100 Subject: [PATCH 228/562] Bump typing-extensions from 4.5.0 to 4.7.1 (#15947) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8ce11440ad..14f190613d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2993,13 +2993,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.5.0" +version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" optional = false python-versions = ">=3.7" files = [ - {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, - {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, + {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, + {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, ] [[package]] From 0d522b58a61864ea48148c24447baaef00d8ddef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 10:39:51 +0100 Subject: [PATCH 229/562] Bump jsonschema from 4.17.3 to 4.18.3 (#15948) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 186 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 144 insertions(+), 42 deletions(-) diff --git a/poetry.lock b/poetry.lock index 14f190613d..27c8b103e5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -973,25 +973,42 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.17.3" +version = "4.18.3" description = "An implementation of JSON Schema validation for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, - {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, + {file = "jsonschema-4.18.3-py3-none-any.whl", hash = "sha256:aab78b34c2de001c6b692232f08c21a97b436fe18e0b817bf0511046924fceef"}, + {file = "jsonschema-4.18.3.tar.gz", hash = "sha256:64b7104d72efe856bea49ca4af37a14a9eba31b40bb7238179f3803130fd34d9"}, ] [package.dependencies] -attrs = ">=17.4.0" +attrs = ">=22.2.0" importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +jsonschema-specifications = ">=2023.03.6" pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} -pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] +[[package]] +name = "jsonschema-specifications" +version = "2023.6.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.6.1-py3-none-any.whl", hash = "sha256:3d2b82663aff01815f744bb5c7887e2121a63399b49b104a3c96145474d091d7"}, + {file = "jsonschema_specifications-2023.6.1.tar.gz", hash = "sha256:ca1c4dd059a9e7b34101cf5b3ab7ff1d18b139f35950d598d629837ef66e8f28"}, +] + +[package.dependencies] +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +referencing = ">=0.28.0" + [[package]] name = "keyring" version = "23.13.1" @@ -2014,42 +2031,6 @@ cryptography = ">=38.0.0,<40.0.0 || >40.0.0,<40.0.1 || >40.0.1,<42" docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"] test = ["flaky", "pretend", "pytest (>=3.0.1)"] -[[package]] -name = "pyrsistent" -version = "0.19.3" -description = "Persistent/Functional/Immutable data structures" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, - {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, - {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, - {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, - {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, - {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, - {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, - {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, - {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, - {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, - {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, - {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, - {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, - {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, - {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, - {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, - {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, - {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, - {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, - {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, - {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, - {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, -] - [[package]] name = "pysaml2" version = "7.3.1" @@ -2178,6 +2159,21 @@ Pygments = ">=2.5.1" [package.extras] md = ["cmarkgfm (>=0.8.0)"] +[[package]] +name = "referencing" +version = "0.29.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.29.1-py3-none-any.whl", hash = "sha256:d3c8f323ee1480095da44d55917cfb8278d73d6b4d5f677e3e40eb21314ac67f"}, + {file = "referencing-0.29.1.tar.gz", hash = "sha256:90cb53782d550ba28d2166ef3f55731f38397def8832baac5d45235f1995e35e"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + [[package]] name = "requests" version = "2.31.0" @@ -2246,6 +2242,112 @@ typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9 [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] +[[package]] +name = "rpds-py" +version = "0.8.10" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.8.10-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:93d06cccae15b3836247319eee7b6f1fdcd6c10dabb4e6d350d27bd0bdca2711"}, + {file = "rpds_py-0.8.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3816a890a6a9e9f1de250afa12ca71c9a7a62f2b715a29af6aaee3aea112c181"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7c6304b894546b5a6bdc0fe15761fa53fe87d28527a7142dae8de3c663853e1"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad3bfb44c8840fb4be719dc58e229f435e227fbfbe133dc33f34981ff622a8f8"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14f1c356712f66653b777ecd8819804781b23dbbac4eade4366b94944c9e78ad"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82bb361cae4d0a627006dadd69dc2f36b7ad5dc1367af9d02e296ec565248b5b"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2e3c4f2a8e3da47f850d7ea0d7d56720f0f091d66add889056098c4b2fd576c"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15a90d0ac11b4499171067ae40a220d1ca3cb685ec0acc356d8f3800e07e4cb8"}, + {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:70bb9c8004b97b4ef7ae56a2aa56dfaa74734a0987c78e7e85f00004ab9bf2d0"}, + {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d64f9f88d5203274a002b54442cafc9c7a1abff2a238f3e767b70aadf919b451"}, + {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ccbbd276642788c4376fbe8d4e6c50f0fb4972ce09ecb051509062915891cbf0"}, + {file = "rpds_py-0.8.10-cp310-none-win32.whl", hash = "sha256:fafc0049add8043ad07ab5382ee80d80ed7e3699847f26c9a5cf4d3714d96a84"}, + {file = "rpds_py-0.8.10-cp310-none-win_amd64.whl", hash = "sha256:915031002c86a5add7c6fd4beb601b2415e8a1c956590a5f91d825858e92fe6e"}, + {file = "rpds_py-0.8.10-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:84eb541a44f7a18f07a6bfc48b95240739e93defe1fdfb4f2a295f37837945d7"}, + {file = "rpds_py-0.8.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f59996d0550894affaad8743e97b9b9c98f638b221fac12909210ec3d9294786"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9adb5664b78fcfcd830000416c8cc69853ef43cb084d645b3f1f0296edd9bae"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f96f3f98fbff7af29e9edf9a6584f3c1382e7788783d07ba3721790625caa43e"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:376b8de737401050bd12810003d207e824380be58810c031f10ec563ff6aef3d"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d1c2bc319428d50b3e0fa6b673ab8cc7fa2755a92898db3a594cbc4eeb6d1f7"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73a1e48430f418f0ac3dfd87860e4cc0d33ad6c0f589099a298cb53724db1169"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134ec8f14ca7dbc6d9ae34dac632cdd60939fe3734b5d287a69683c037c51acb"}, + {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4b519bac7c09444dd85280fd60f28c6dde4389c88dddf4279ba9b630aca3bbbe"}, + {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9cd57981d9fab04fc74438d82460f057a2419974d69a96b06a440822d693b3c0"}, + {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69d089c026f6a8b9d64a06ff67dc3be196707b699d7f6ca930c25f00cf5e30d8"}, + {file = "rpds_py-0.8.10-cp311-none-win32.whl", hash = "sha256:220bdcad2d2936f674650d304e20ac480a3ce88a40fe56cd084b5780f1d104d9"}, + {file = "rpds_py-0.8.10-cp311-none-win_amd64.whl", hash = "sha256:6c6a0225b8501d881b32ebf3f5807a08ad3685b5eb5f0a6bfffd3a6e039b2055"}, + {file = "rpds_py-0.8.10-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e3d0cd3dff0e7638a7b5390f3a53057c4e347f4ef122ee84ed93fc2fb7ea4aa2"}, + {file = "rpds_py-0.8.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d77dff3a5aa5eedcc3da0ebd10ff8e4969bc9541aa3333a8d41715b429e99f47"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41c89a366eae49ad9e65ed443a8f94aee762931a1e3723749d72aeac80f5ef2f"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3793c21494bad1373da517001d0849eea322e9a049a0e4789e50d8d1329df8e7"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:805a5f3f05d186c5d50de2e26f765ba7896d0cc1ac5b14ffc36fae36df5d2f10"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b01b39ad5411563031ea3977bbbc7324d82b088e802339e6296f082f78f6115c"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f1e860be21f3e83011116a65e7310486300e08d9a3028e73e8d13bb6c77292"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a13c8e56c46474cd5958d525ce6a9996727a83d9335684e41f5192c83deb6c58"}, + {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:93d99f957a300d7a4ced41615c45aeb0343bb8f067c42b770b505de67a132346"}, + {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:148b0b38d719c0760e31ce9285a9872972bdd7774969a4154f40c980e5beaca7"}, + {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3cc5e5b5514796f45f03a568981971b12a3570f3de2e76114f7dc18d4b60a3c4"}, + {file = "rpds_py-0.8.10-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e8e24b210a4deb5a7744971f8f77393005bae7f873568e37dfd9effe808be7f7"}, + {file = "rpds_py-0.8.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b41941583adce4242af003d2a8337b066ba6148ca435f295f31ac6d9e4ea2722"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c490204e16bca4f835dba8467869fe7295cdeaa096e4c5a7af97f3454a97991"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ee45cd1d84beed6cbebc839fd85c2e70a3a1325c8cfd16b62c96e2ffb565eca"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a8ca409f1252e1220bf09c57290b76cae2f14723746215a1e0506472ebd7bdf"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96b293c0498c70162effb13100624c5863797d99df75f2f647438bd10cbf73e4"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4627520a02fccbd324b33c7a83e5d7906ec746e1083a9ac93c41ac7d15548c7"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e39d7ab0c18ac99955b36cd19f43926450baba21e3250f053e0704d6ffd76873"}, + {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ba9f1d1ebe4b63801977cec7401f2d41e888128ae40b5441270d43140efcad52"}, + {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:802f42200d8caf7f25bbb2a6464cbd83e69d600151b7e3b49f49a47fa56b0a38"}, + {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d19db6ba816e7f59fc806c690918da80a7d186f00247048cd833acdab9b4847b"}, + {file = "rpds_py-0.8.10-cp38-none-win32.whl", hash = "sha256:7947e6e2c2ad68b1c12ee797d15e5f8d0db36331200b0346871492784083b0c6"}, + {file = "rpds_py-0.8.10-cp38-none-win_amd64.whl", hash = "sha256:fa326b3505d5784436d9433b7980171ab2375535d93dd63fbcd20af2b5ca1bb6"}, + {file = "rpds_py-0.8.10-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7b38a9ac96eeb6613e7f312cd0014de64c3f07000e8bf0004ad6ec153bac46f8"}, + {file = "rpds_py-0.8.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c4d42e83ddbf3445e6514f0aff96dca511421ed0392d9977d3990d9f1ba6753c"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b21575031478609db6dbd1f0465e739fe0e7f424a8e7e87610a6c7f68b4eb16"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:574868858a7ff6011192c023a5289158ed20e3f3b94b54f97210a773f2f22921"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae40f4a70a1f40939d66ecbaf8e7edc144fded190c4a45898a8cfe19d8fc85ea"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37f7ee4dc86db7af3bac6d2a2cedbecb8e57ce4ed081f6464510e537589f8b1e"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:695f642a3a5dbd4ad2ffbbacf784716ecd87f1b7a460843b9ddf965ccaeafff4"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f43ab4cb04bde6109eb2555528a64dfd8a265cc6a9920a67dcbde13ef53a46c8"}, + {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a11ab0d97be374efd04f640c04fe5c2d3dabc6dfb998954ea946ee3aec97056d"}, + {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:92cf5b3ee60eef41f41e1a2cabca466846fb22f37fc580ffbcb934d1bcab225a"}, + {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ceaac0c603bf5ac2f505a78b2dcab78d3e6b706be6596c8364b64cc613d208d2"}, + {file = "rpds_py-0.8.10-cp39-none-win32.whl", hash = "sha256:dd4f16e57c12c0ae17606c53d1b57d8d1c8792efe3f065a37cb3341340599d49"}, + {file = "rpds_py-0.8.10-cp39-none-win_amd64.whl", hash = "sha256:c03a435d26c3999c2a8642cecad5d1c4d10c961817536af52035f6f4ee2f5dd0"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0da53292edafecba5e1d8c1218f99babf2ed0bf1c791d83c0ab5c29b57223068"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d20a8ed227683401cc508e7be58cba90cc97f784ea8b039c8cd01111e6043e0"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97cab733d303252f7c2f7052bf021a3469d764fc2b65e6dbef5af3cbf89d4892"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8c398fda6df361a30935ab4c4bccb7f7a3daef2964ca237f607c90e9f3fdf66f"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2eb4b08c45f8f8d8254cdbfacd3fc5d6b415d64487fb30d7380b0d0569837bf1"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7dfb1cbb895810fa2b892b68153c17716c6abaa22c7dc2b2f6dcf3364932a1c"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89c92b74e8bf6f53a6f4995fd52f4bd510c12f103ee62c99e22bc9e05d45583c"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9c0683cb35a9b5881b41bc01d5568ffc667910d9dbc632a1fba4e7d59e98773"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0eeb2731708207d0fe2619afe6c4dc8cb9798f7de052da891de5f19c0006c315"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:7495010b658ec5b52835f21d8c8b1a7e52e194c50f095d4223c0b96c3da704b1"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c72ebc22e70e04126158c46ba56b85372bc4d54d00d296be060b0db1671638a4"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2cd3045e7f6375dda64ed7db1c5136826facb0159ea982f77d9cf6125025bd34"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2418cf17d653d24ffb8b75e81f9f60b7ba1b009a23298a433a4720b2a0a17017"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a2edf8173ac0c7a19da21bc68818be1321998528b5e3f748d6ee90c0ba2a1fd"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f29b8c55fd3a2bc48e485e37c4e2df3317f43b5cc6c4b6631c33726f52ffbb3"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a7d20c1cf8d7b3960c5072c265ec47b3f72a0c608a9a6ee0103189b4f28d531"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:521fc8861a86ae54359edf53a15a05fabc10593cea7b3357574132f8427a5e5a"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5c191713e98e7c28800233f039a32a42c1a4f9a001a8a0f2448b07391881036"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:083df0fafe199371206111583c686c985dddaf95ab3ee8e7b24f1fda54515d09"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ed41f3f49507936a6fe7003985ea2574daccfef999775525d79eb67344e23767"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:2614c2732bf45de5c7f9e9e54e18bc78693fa2f635ae58d2895b7965e470378c"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c60528671d9d467009a6ec284582179f6b88651e83367d0ab54cb739021cd7de"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ee744fca8d1ea822480a2a4e7c5f2e1950745477143668f0b523769426060f29"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a38b9f526d0d6cbdaa37808c400e3d9f9473ac4ff64d33d9163fd05d243dbd9b"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60e0e86e870350e03b3e25f9b1dd2c6cc72d2b5f24e070249418320a6f9097b7"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f53f55a8852f0e49b0fc76f2412045d6ad9d5772251dea8f55ea45021616e7d5"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c493365d3fad241d52f096e4995475a60a80f4eba4d3ff89b713bc65c2ca9615"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:300eb606e6b94a7a26f11c8cc8ee59e295c6649bd927f91e1dbd37a4c89430b6"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a665f6f1a87614d1c3039baf44109094926dedf785e346d8b0a728e9cabd27a"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:927d784648211447201d4c6f1babddb7971abad922b32257ab74de2f2750fad0"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c200b30dd573afa83847bed7e3041aa36a8145221bf0cfdfaa62d974d720805c"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:08166467258fd0240a1256fce272f689f2360227ee41c72aeea103e9e4f63d2b"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:996cc95830de9bc22b183661d95559ec6b3cd900ad7bc9154c4cbf5be0c9b734"}, + {file = "rpds_py-0.8.10.tar.gz", hash = "sha256:13e643ce8ad502a0263397362fb887594b49cf84bf518d6038c16f235f2bcea4"}, +] + [[package]] name = "ruff" version = "0.0.277" From 1768dd3c274829bc147a96258c11bc75cbf762bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 10:45:46 +0100 Subject: [PATCH 230/562] Bump serde_json from 1.0.100 to 1.0.103 (#15950) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d6fa96ea77..a3ff932cd8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.100" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" +checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" dependencies = [ "itoa", "ryu", From 43ee5d5bac043c8d695b8c60e34a734432778cdf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 10:46:26 +0100 Subject: [PATCH 231/562] Bump pyo3-log from 0.8.2 to 0.8.3 (#15951) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3ff932cd8..ee2409e785 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,9 +229,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444" +checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605" dependencies = [ "arc-swap", "log", From c692283751799758afcd036bb386f843ce002979 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 13:20:34 +0100 Subject: [PATCH 232/562] Bump anyhow from 1.0.71 to 1.0.72 (#15949) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee2409e785..2264e67245 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "arc-swap" From 1c802de626de3293049206cb788af15cbc8ea17f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 18 Jul 2023 03:49:21 -0500 Subject: [PATCH 233/562] Re-introduce the outbound federation proxy (#15913) Allow configuring the set of workers to proxy outbound federation traffic through (`outbound_federation_restricted_to`). This is useful when you have a worker setup with `federation_sender` instances responsible for sending outbound federation requests and want to make sure *all* outbound federation traffic goes through those instances. Before this change, the generic workers would still contact federation themselves for things like profile lookups, backfill, etc. This PR allows you to set more strict access controls/firewall for all workers and only allow the `federation_sender`'s to contact the outside world. --- changelog.d/15913.feature | 1 + .../configuration/config_documentation.md | 33 +- docs/workers.md | 24 ++ synapse/api/errors.py | 7 + synapse/app/_base.py | 2 + synapse/app/generic_worker.py | 1 + synapse/app/homeserver.py | 1 + synapse/config/workers.py | 45 ++- synapse/http/client.py | 7 +- synapse/http/connectproxyclient.py | 20 +- synapse/http/matrixfederationclient.py | 142 ++++++++- synapse/http/proxy.py | 283 +++++++++++++++++ synapse/http/proxyagent.py | 141 ++++++++- synapse/http/server.py | 55 ++-- synapse/http/site.py | 27 +- tests/app/test_openid_listener.py | 8 +- tests/handlers/test_device.py | 3 +- tests/handlers/test_federation.py | 2 +- tests/handlers/test_presence.py | 1 - tests/handlers/test_typing.py | 10 + tests/http/test_matrixfederationclient.py | 284 +++++++++++++++++- tests/http/test_proxy.py | 53 ++++ tests/http/test_proxyagent.py | 4 +- tests/replication/_base.py | 3 +- .../test_federation_sender_shard.py | 22 +- tests/rest/client/test_presence.py | 1 - tests/rest/client/test_rooms.py | 2 - tests/storage/test_e2e_room_keys.py | 2 +- tests/storage/test_purge.py | 2 +- tests/storage/test_rollback_worker.py | 4 +- tests/test_server.py | 33 +- tests/unittest.py | 1 + 32 files changed, 1128 insertions(+), 96 deletions(-) create mode 100644 changelog.d/15913.feature create mode 100644 synapse/http/proxy.py create mode 100644 tests/http/test_proxy.py diff --git a/changelog.d/15913.feature b/changelog.d/15913.feature new file mode 100644 index 0000000000..0d77fae2dc --- /dev/null +++ b/changelog.d/15913.feature @@ -0,0 +1 @@ +Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 22cd1772dc..4e6fcd085a 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3960,13 +3960,14 @@ federation_sender_instances: --- ### `instance_map` -When using workers this should be a map from [`worker_name`](#worker_name) to the -HTTP replication listener of the worker, if configured, and to the main process. -Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs -a HTTP replication listener, and that listener should be included in the `instance_map`. -The main process also needs an entry on the `instance_map`, and it should be listed under -`main` **if even one other worker exists**. Ensure the port matches with what is declared -inside the `listener` block for a `replication` listener. +When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP +replication listener of the worker, if configured, and to the main process. Each worker +declared under [`stream_writers`](../../workers.md#stream-writers) and +[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP +replication listener, and that listener should be included in the `instance_map`. The +main process also needs an entry on the `instance_map`, and it should be listed under +`main` **if even one other worker exists**. Ensure the port matches with what is +declared inside the `listener` block for a `replication` listener. Example configuration: @@ -4004,6 +4005,24 @@ stream_writers: typing: worker1 ``` --- +### `outbound_federation_restricted_to` + +When using workers, you can restrict outbound federation traffic to only go through a +specific subset of workers. Any worker specified here must also be in the +[`instance_map`](#instance_map). +[`worker_replication_secret`](#worker_replication_secret) must also be configured to +authorize inter-worker communication. + +```yaml +outbound_federation_restricted_to: + - federation_sender1 + - federation_sender2 +``` + +Also see the [worker +documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) +for more info. +--- ### `run_background_tasks_on` The [worker](../../workers.md#background-tasks) that is used to run diff --git a/docs/workers.md b/docs/workers.md index cf9c0add82..24bd22724e 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -531,6 +531,30 @@ the stream writer for the `presence` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ +#### Restrict outbound federation traffic to a specific set of workers + +The +[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to) +configuration is useful to make sure outbound federation traffic only goes through a +specified subset of workers. This allows you to set more strict access controls (like a +firewall) for all workers and only allow the `federation_sender`'s to contact the +outside world. + +```yaml +instance_map: + main: + host: localhost + port: 8030 + federation_sender1: + host: localhost + port: 8034 + +outbound_federation_restricted_to: + - federation_sender1 + +worker_replication_secret: "secret_secret" +``` + #### Background tasks There is also support for moving background tasks to a separate diff --git a/synapse/api/errors.py b/synapse/api/errors.py index af894243f8..3546aaf7c3 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -217,6 +217,13 @@ class InvalidAPICallError(SynapseError): super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON) +class InvalidProxyCredentialsError(SynapseError): + """Error raised when the proxy credentials are invalid.""" + + def __init__(self, msg: str, errcode: str = Codes.UNKNOWN): + super().__init__(401, msg, errcode) + + class ProxiedRequestError(SynapseError): """An error from a general matrix endpoint, eg. from a proxied Matrix API call. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 936b1b0430..a94b57a671 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -386,6 +386,7 @@ def listen_unix( def listen_http( + hs: "HomeServer", listener_config: ListenerConfig, root_resource: Resource, version_string: str, @@ -406,6 +407,7 @@ def listen_http( version_string, max_request_body_size=max_request_body_size, reactor=reactor, + hs=hs, ) if isinstance(listener_config, TCPListenerConfig): diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 7406c3948c..dc79efcc14 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -221,6 +221,7 @@ class GenericWorkerServer(HomeServer): root_resource = create_resource_tree(resources, OptionsResource()) _base.listen_http( + self, listener_config, root_resource, self.version_string, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 84236ac299..f188c7265a 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -139,6 +139,7 @@ class SynapseHomeServer(HomeServer): root_resource = OptionsResource() ports = listen_http( + self, listener_config, create_resource_tree(resources, root_resource), self.version_string, diff --git a/synapse/config/workers.py b/synapse/config/workers.py index e55ca12a36..6567fb6bb0 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -15,7 +15,7 @@ import argparse import logging -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Optional, Union import attr from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr @@ -171,6 +171,27 @@ class WriterLocations: ) +@attr.s(auto_attribs=True) +class OutboundFederationRestrictedTo: + """Whether we limit outbound federation to a certain set of instances. + + Attributes: + instances: optional list of instances that can make outbound federation + requests. If None then all instances can make federation requests. + locations: list of instance locations to connect to proxy via. + """ + + instances: Optional[List[str]] + locations: List[InstanceLocationConfig] = attr.Factory(list) + + def __contains__(self, instance: str) -> bool: + # It feels a bit dirty to return `True` if `instances` is `None`, but it makes + # sense in downstream usage in the sense that if + # `outbound_federation_restricted_to` is not configured, then any instance can + # talk to federation (no restrictions so always return `True`). + return self.instances is None or instance in self.instances + + class WorkerConfig(Config): """The workers are processes run separately to the main synapse process. They have their own pid_file and listener configuration. They use the @@ -385,6 +406,28 @@ class WorkerConfig(Config): new_option_name="update_user_directory_from_worker", ) + outbound_federation_restricted_to = config.get( + "outbound_federation_restricted_to", None + ) + self.outbound_federation_restricted_to = OutboundFederationRestrictedTo( + outbound_federation_restricted_to + ) + if outbound_federation_restricted_to: + if not self.worker_replication_secret: + raise ConfigError( + "`worker_replication_secret` must be configured when using `outbound_federation_restricted_to`." + ) + + for instance in outbound_federation_restricted_to: + if instance not in self.instance_map: + raise ConfigError( + "Instance %r is configured in 'outbound_federation_restricted_to' but does not appear in `instance_map` config." + % (instance,) + ) + self.outbound_federation_restricted_to.locations.append( + self.instance_map[instance] + ) + def _should_this_worker_perform_duty( self, config: Dict[str, Any], diff --git a/synapse/http/client.py b/synapse/http/client.py index 09ea93e10d..ca2cdbc6e2 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1037,7 +1037,12 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): if reason.check(ResponseDone): self.deferred.callback(self.length) elif reason.check(PotentialDataLoss): - # stolen from https://github.com/twisted/treq/pull/49/files + # This applies to requests which don't set `Content-Length` or a + # `Transfer-Encoding` in the response because in this case the end of the + # response is indicated by the connection being closed, an event which may + # also be due to a transient network problem or other error. But since this + # behavior is expected of some servers (like YouTube), let's ignore it. + # Stolen from https://github.com/twisted/treq/pull/49/files # http://twistedmatrix.com/trac/ticket/4840 self.deferred.callback(self.length) else: diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py index 23a60af171..636efc33e8 100644 --- a/synapse/http/connectproxyclient.py +++ b/synapse/http/connectproxyclient.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import abc import base64 import logging from typing import Optional, Union @@ -39,8 +40,14 @@ class ProxyConnectError(ConnectError): pass -@attr.s(auto_attribs=True) class ProxyCredentials: + @abc.abstractmethod + def as_proxy_authorization_value(self) -> bytes: + raise NotImplementedError() + + +@attr.s(auto_attribs=True) +class BasicProxyCredentials(ProxyCredentials): username_password: bytes def as_proxy_authorization_value(self) -> bytes: @@ -55,6 +62,17 @@ class ProxyCredentials: return b"Basic " + base64.encodebytes(self.username_password) +@attr.s(auto_attribs=True) +class BearerProxyCredentials(ProxyCredentials): + access_token: bytes + + def as_proxy_authorization_value(self) -> bytes: + """ + Return the value for a Proxy-Authorization header (i.e. 'Bearer xxx'). + """ + return b"Bearer " + self.access_token + + @implementer(IStreamClientEndpoint) class HTTPConnectProxyEndpoint: """An Endpoint implementation which will send a CONNECT request to an http proxy diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index cc4e258b0f..583c03447c 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -50,7 +50,7 @@ from twisted.internet.interfaces import IReactorTime from twisted.internet.task import Cooperator from twisted.web.client import ResponseFailed from twisted.web.http_headers import Headers -from twisted.web.iweb import IBodyProducer, IResponse +from twisted.web.iweb import IAgent, IBodyProducer, IResponse import synapse.metrics import synapse.util.retryutils @@ -71,7 +71,9 @@ from synapse.http.client import ( encode_query_args, read_body_with_max_size, ) +from synapse.http.connectproxyclient import BearerProxyCredentials from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent +from synapse.http.proxyagent import ProxyAgent from synapse.http.types import QueryParams from synapse.logging import opentracing from synapse.logging.context import make_deferred_yieldable, run_in_background @@ -393,17 +395,41 @@ class MatrixFederationHttpClient: if hs.config.server.user_agent_suffix: user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix) - federation_agent = MatrixFederationAgent( - self.reactor, - tls_client_options_factory, - user_agent.encode("ascii"), - hs.config.server.federation_ip_range_allowlist, - hs.config.server.federation_ip_range_blocklist, + outbound_federation_restricted_to = ( + hs.config.worker.outbound_federation_restricted_to ) + if hs.get_instance_name() in outbound_federation_restricted_to: + # Talk to federation directly + federation_agent: IAgent = MatrixFederationAgent( + self.reactor, + tls_client_options_factory, + user_agent.encode("ascii"), + hs.config.server.federation_ip_range_allowlist, + hs.config.server.federation_ip_range_blocklist, + ) + else: + proxy_authorization_secret = hs.config.worker.worker_replication_secret + assert ( + proxy_authorization_secret is not None + ), "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)" + federation_proxy_credentials = BearerProxyCredentials( + proxy_authorization_secret.encode("ascii") + ) + + # We need to talk to federation via the proxy via one of the configured + # locations + federation_proxy_locations = outbound_federation_restricted_to.locations + federation_agent = ProxyAgent( + self.reactor, + self.reactor, + tls_client_options_factory, + federation_proxy_locations=federation_proxy_locations, + federation_proxy_credentials=federation_proxy_credentials, + ) # Use a BlocklistingAgentWrapper to prevent circumventing the IP # blocking via IP literals in server names - self.agent = BlocklistingAgentWrapper( + self.agent: IAgent = BlocklistingAgentWrapper( federation_agent, ip_blocklist=hs.config.server.federation_ip_range_blocklist, ) @@ -412,7 +438,6 @@ class MatrixFederationHttpClient: self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000 - self.max_long_retry_delay_seconds = ( hs.config.federation.max_long_retry_delay_ms / 1000 ) @@ -1131,6 +1156,101 @@ class MatrixFederationHttpClient: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. + Raises: + HttpResponseException: If we get an HTTP response code >= 300 + (except 429). + NotRetryingDestination: If we are not yet ready to retry this + server. + FederationDeniedError: If this destination is not on our + federation whitelist + RequestSendFailed: If there were problems connecting to the + remote, due to e.g. DNS failures, connection timeouts etc. + """ + json_dict, _ = await self.get_json_with_headers( + destination=destination, + path=path, + args=args, + retry_on_dns_fail=retry_on_dns_fail, + timeout=timeout, + ignore_backoff=ignore_backoff, + try_trailing_slash_on_400=try_trailing_slash_on_400, + parser=parser, + ) + return json_dict + + @overload + async def get_json_with_headers( + self, + destination: str, + path: str, + args: Optional[QueryParams] = None, + retry_on_dns_fail: bool = True, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Literal[None] = None, + ) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]: + ... + + @overload + async def get_json_with_headers( + self, + destination: str, + path: str, + args: Optional[QueryParams] = ..., + retry_on_dns_fail: bool = ..., + timeout: Optional[int] = ..., + ignore_backoff: bool = ..., + try_trailing_slash_on_400: bool = ..., + parser: ByteParser[T] = ..., + ) -> Tuple[T, Dict[bytes, List[bytes]]]: + ... + + async def get_json_with_headers( + self, + destination: str, + path: str, + args: Optional[QueryParams] = None, + retry_on_dns_fail: bool = True, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser[T]] = None, + ) -> Tuple[Union[JsonDict, T], Dict[bytes, List[bytes]]]: + """GETs some json from the given host homeserver and path + + Args: + destination: The remote server to send the HTTP request to. + + path: The HTTP path. + + args: A dictionary used to create query strings, defaults to + None. + + retry_on_dns_fail: true if the request should be retried on DNS failures + + timeout: number of milliseconds to wait for the response. + self._default_timeout (60s) by default. + + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + + ignore_backoff: true to ignore the historical backoff data + and try the request anyway. + + try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED + response we should try appending a trailing slash to the end of + the request. Workaround for #3622 in Synapse <= v0.99.3. + + parser: The parser to use to decode the response. Defaults to + parsing as JSON. + + Returns: + Succeeds when we get a 2xx HTTP response. The result will be a tuple of the + decoded JSON body and a dict of the response headers. + Raises: HttpResponseException: If we get an HTTP response code >= 300 (except 429). @@ -1156,6 +1276,8 @@ class MatrixFederationHttpClient: timeout=timeout, ) + headers = dict(response.headers.getAllRawHeaders()) + if timeout is not None: _sec_timeout = timeout / 1000 else: @@ -1173,7 +1295,7 @@ class MatrixFederationHttpClient: parser=parser, ) - return body + return body, headers async def delete_json( self, diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py new file mode 100644 index 0000000000..c9f51e51bc --- /dev/null +++ b/synapse/http/proxy.py @@ -0,0 +1,283 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import json +import logging +import urllib.parse +from typing import TYPE_CHECKING, Any, Optional, Set, Tuple, cast + +from twisted.internet import protocol +from twisted.internet.interfaces import ITCPTransport +from twisted.internet.protocol import connectionDone +from twisted.python import failure +from twisted.python.failure import Failure +from twisted.web.client import ResponseDone +from twisted.web.http_headers import Headers +from twisted.web.iweb import IResponse +from twisted.web.resource import IResource +from twisted.web.server import Request, Site + +from synapse.api.errors import Codes, InvalidProxyCredentialsError +from synapse.http import QuieterFileBodyProducer +from synapse.http.server import _AsyncResource +from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.types import ISynapseReactor +from synapse.util.async_helpers import timeout_deferred + +if TYPE_CHECKING: + from synapse.http.site import SynapseRequest + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + +# "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616 +# section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be +# consumed by the immediate recipient and not be forwarded on. +HOP_BY_HOP_HEADERS = { + "Connection", + "Keep-Alive", + "Proxy-Authenticate", + "Proxy-Authorization", + "TE", + "Trailers", + "Transfer-Encoding", + "Upgrade", +} + + +def parse_connection_header_value( + connection_header_value: Optional[bytes], +) -> Set[str]: + """ + Parse the `Connection` header to determine which headers we should not be copied + over from the remote response. + + As defined by RFC2616 section 14.10 and RFC9110 section 7.6.1 + + Example: `Connection: close, X-Foo, X-Bar` will return `{"Close", "X-Foo", "X-Bar"}` + + Even though "close" is a special directive, let's just treat it as just another + header for simplicity. If people want to check for this directive, they can simply + check for `"Close" in headers`. + + Args: + connection_header_value: The value of the `Connection` header. + + Returns: + The set of header names that should not be copied over from the remote response. + The keys are capitalized in canonical capitalization. + """ + headers = Headers() + extra_headers_to_remove: Set[str] = set() + if connection_header_value: + extra_headers_to_remove = { + headers._canonicalNameCaps(connection_option.strip()).decode("ascii") + for connection_option in connection_header_value.split(b",") + } + + return extra_headers_to_remove + + +class ProxyResource(_AsyncResource): + """ + A stub resource that proxies any requests with a `matrix-federation://` scheme + through the given `federation_agent` to the remote homeserver and ferries back the + info. + """ + + isLeaf = True + + def __init__(self, reactor: ISynapseReactor, hs: "HomeServer"): + super().__init__(True) + + self.reactor = reactor + self.agent = hs.get_federation_http_client().agent + + self._proxy_authorization_secret = hs.config.worker.worker_replication_secret + + def _check_auth(self, request: Request) -> None: + # The `matrix-federation://` proxy functionality can only be used with auth. + # Protect homserver admins forgetting to configure a secret. + assert self._proxy_authorization_secret is not None + + # Get the authorization header. + auth_headers = request.requestHeaders.getRawHeaders(b"Proxy-Authorization") + + if not auth_headers: + raise InvalidProxyCredentialsError( + "Missing Proxy-Authorization header.", Codes.MISSING_TOKEN + ) + if len(auth_headers) > 1: + raise InvalidProxyCredentialsError( + "Too many Proxy-Authorization headers.", Codes.UNAUTHORIZED + ) + parts = auth_headers[0].split(b" ") + if parts[0] == b"Bearer" and len(parts) == 2: + received_secret = parts[1].decode("ascii") + if self._proxy_authorization_secret == received_secret: + # Success! + return + + raise InvalidProxyCredentialsError( + "Invalid Proxy-Authorization header.", Codes.UNAUTHORIZED + ) + + async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: + uri = urllib.parse.urlparse(request.uri) + assert uri.scheme == b"matrix-federation" + + # Check the authorization headers before handling the request. + self._check_auth(request) + + headers = Headers() + for header_name in (b"User-Agent", b"Authorization", b"Content-Type"): + header_value = request.getHeader(header_name) + if header_value: + headers.addRawHeader(header_name, header_value) + + request_deferred = run_in_background( + self.agent.request, + request.method, + request.uri, + headers=headers, + bodyProducer=QuieterFileBodyProducer(request.content), + ) + request_deferred = timeout_deferred( + request_deferred, + # This should be set longer than the timeout in `MatrixFederationHttpClient` + # so that it has enough time to complete and pass us the data before we give + # up. + timeout=90, + reactor=self.reactor, + ) + + response = await make_deferred_yieldable(request_deferred) + + return response.code, response + + def _send_response( + self, + request: "SynapseRequest", + code: int, + response_object: Any, + ) -> None: + response = cast(IResponse, response_object) + response_headers = cast(Headers, response.headers) + + request.setResponseCode(code) + + # The `Connection` header also defines which headers should not be copied over. + connection_header = response_headers.getRawHeaders(b"connection") + extra_headers_to_remove = parse_connection_header_value( + connection_header[0] if connection_header else None + ) + + # Copy headers. + for k, v in response_headers.getAllRawHeaders(): + # Do not copy over any hop-by-hop headers. These are meant to only be + # consumed by the immediate recipient and not be forwarded on. + header_key = k.decode("ascii") + if ( + header_key in HOP_BY_HOP_HEADERS + or header_key in extra_headers_to_remove + ): + continue + + request.responseHeaders.setRawHeaders(k, v) + + response.deliverBody(_ProxyResponseBody(request)) + + def _send_error_response( + self, + f: failure.Failure, + request: "SynapseRequest", + ) -> None: + if isinstance(f.value, InvalidProxyCredentialsError): + error_response_code = f.value.code + error_response_json = {"errcode": f.value.errcode, "err": f.value.msg} + else: + error_response_code = 502 + error_response_json = { + "errcode": Codes.UNKNOWN, + "err": "ProxyResource: Error when proxying request: %s %s -> %s" + % ( + request.method.decode("ascii"), + request.uri.decode("ascii"), + f, + ), + } + + request.setResponseCode(error_response_code) + request.setHeader(b"Content-Type", b"application/json") + request.write((json.dumps(error_response_json)).encode()) + request.finish() + + +class _ProxyResponseBody(protocol.Protocol): + """ + A protocol that proxies the given remote response data back out to the given local + request. + """ + + transport: Optional[ITCPTransport] = None + + def __init__(self, request: "SynapseRequest") -> None: + self._request = request + + def dataReceived(self, data: bytes) -> None: + # Avoid sending response data to the local request that already disconnected + if self._request._disconnected and self.transport is not None: + # Close the connection (forcefully) since all the data will get + # discarded anyway. + self.transport.abortConnection() + return + + self._request.write(data) + + def connectionLost(self, reason: Failure = connectionDone) -> None: + # If the local request is already finished (successfully or failed), don't + # worry about sending anything back. + if self._request.finished: + return + + if reason.check(ResponseDone): + self._request.finish() + else: + # Abort the underlying request since our remote request also failed. + self._request.transport.abortConnection() + + +class ProxySite(Site): + """ + Proxies any requests with a `matrix-federation://` scheme through the given + `federation_agent`. Otherwise, behaves like a normal `Site`. + """ + + def __init__( + self, + resource: IResource, + reactor: ISynapseReactor, + hs: "HomeServer", + ): + super().__init__(resource, reactor=reactor) + + self._proxy_resource = ProxyResource(reactor, hs=hs) + + def getResourceFor(self, request: "SynapseRequest") -> IResource: + uri = urllib.parse.urlparse(request.uri) + if uri.scheme == b"matrix-federation": + return self._proxy_resource + + return super().getResourceFor(request) diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 7bdc4acae7..59ab8fad35 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import random import re -from typing import Any, Dict, Optional, Tuple +from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple from urllib.parse import urlparse from urllib.request import ( # type: ignore[attr-defined] getproxies_environment, @@ -23,8 +24,17 @@ from urllib.request import ( # type: ignore[attr-defined] from zope.interface import implementer from twisted.internet import defer -from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS -from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint +from twisted.internet.endpoints import ( + HostnameEndpoint, + UNIXClientEndpoint, + wrapClientTLS, +) +from twisted.internet.interfaces import ( + IProtocol, + IProtocolFactory, + IReactorCore, + IStreamClientEndpoint, +) from twisted.python.failure import Failure from twisted.web.client import ( URI, @@ -36,8 +46,18 @@ from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse +from synapse.config.workers import ( + InstanceLocationConfig, + InstanceTcpLocationConfig, + InstanceUnixLocationConfig, +) from synapse.http import redact_uri -from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials +from synapse.http.connectproxyclient import ( + BasicProxyCredentials, + HTTPConnectProxyEndpoint, + ProxyCredentials, +) +from synapse.logging.context import run_in_background logger = logging.getLogger(__name__) @@ -74,6 +94,14 @@ class ProxyAgent(_AgentBase): use_proxy: Whether proxy settings should be discovered and used from conventional environment variables. + federation_proxy_locations: An optional list of locations to proxy outbound federation + traffic through (only requests that use the `matrix-federation://` scheme + will be proxied). + + federation_proxy_credentials: Required if `federation_proxy_locations` is set. The + credentials to use when proxying outbound federation traffic through another + worker. + Raises: ValueError if use_proxy is set and the environment variables contain an invalid proxy specification. @@ -89,6 +117,8 @@ class ProxyAgent(_AgentBase): bindAddress: Optional[bytes] = None, pool: Optional[HTTPConnectionPool] = None, use_proxy: bool = False, + federation_proxy_locations: Collection[InstanceLocationConfig] = (), + federation_proxy_credentials: Optional[ProxyCredentials] = None, ): contextFactory = contextFactory or BrowserLikePolicyForHTTPS() @@ -127,6 +157,47 @@ class ProxyAgent(_AgentBase): self._policy_for_https = contextFactory self._reactor = reactor + self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None + self._federation_proxy_credentials: Optional[ProxyCredentials] = None + if federation_proxy_locations: + assert ( + federation_proxy_credentials is not None + ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`" + + endpoints: List[IStreamClientEndpoint] = [] + for federation_proxy_location in federation_proxy_locations: + endpoint: IStreamClientEndpoint + if isinstance(federation_proxy_location, InstanceTcpLocationConfig): + endpoint = HostnameEndpoint( + self.proxy_reactor, + federation_proxy_location.host, + federation_proxy_location.port, + ) + if federation_proxy_location.tls: + tls_connection_creator = ( + self._policy_for_https.creatorForNetloc( + federation_proxy_location.host.encode("utf-8"), + federation_proxy_location.port, + ) + ) + endpoint = wrapClientTLS(tls_connection_creator, endpoint) + + elif isinstance(federation_proxy_location, InstanceUnixLocationConfig): + endpoint = UNIXClientEndpoint( + self.proxy_reactor, federation_proxy_location.path + ) + + else: + # It is supremely unlikely we ever hit this + raise SchemeNotSupported( + f"Unknown type of Endpoint requested, check {federation_proxy_location}" + ) + + endpoints.append(endpoint) + + self._federation_proxy_endpoint = _RandomSampleEndpoints(endpoints) + self._federation_proxy_credentials = federation_proxy_credentials + def request( self, method: bytes, @@ -214,6 +285,25 @@ class ProxyAgent(_AgentBase): parsed_uri.port, self.https_proxy_creds, ) + elif ( + parsed_uri.scheme == b"matrix-federation" + and self._federation_proxy_endpoint + ): + assert ( + self._federation_proxy_credentials is not None + ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`" + + # Set a Proxy-Authorization header + if headers is None: + headers = Headers() + # We always need authentication for the outbound federation proxy + headers.addRawHeader( + b"Proxy-Authorization", + self._federation_proxy_credentials.as_proxy_authorization_value(), + ) + + endpoint = self._federation_proxy_endpoint + request_path = uri else: # not using a proxy endpoint = HostnameEndpoint( @@ -233,6 +323,11 @@ class ProxyAgent(_AgentBase): endpoint = wrapClientTLS(tls_connection_creator, endpoint) elif parsed_uri.scheme == b"http": pass + elif ( + parsed_uri.scheme == b"matrix-federation" + and self._federation_proxy_endpoint + ): + pass else: return defer.fail( Failure( @@ -334,6 +429,42 @@ def parse_proxy( credentials = None if url.username and url.password: - credentials = ProxyCredentials(b"".join([url.username, b":", url.password])) + credentials = BasicProxyCredentials( + b"".join([url.username, b":", url.password]) + ) return url.scheme, url.hostname, url.port or default_port, credentials + + +@implementer(IStreamClientEndpoint) +class _RandomSampleEndpoints: + """An endpoint that randomly iterates through a given list of endpoints at + each connection attempt. + """ + + def __init__( + self, + endpoints: Sequence[IStreamClientEndpoint], + ) -> None: + assert endpoints + self._endpoints = endpoints + + def __repr__(self) -> str: + return f"<_RandomSampleEndpoints endpoints={self._endpoints}>" + + def connect( + self, protocol_factory: IProtocolFactory + ) -> "defer.Deferred[IProtocol]": + """Implements IStreamClientEndpoint interface""" + + return run_in_background(self._do_connect, protocol_factory) + + async def _do_connect(self, protocol_factory: IProtocolFactory) -> IProtocol: + failures: List[Failure] = [] + for endpoint in random.sample(self._endpoints, k=len(self._endpoints)): + try: + return await endpoint.connect(protocol_factory) + except Exception: + failures.append(Failure()) + + failures.pop().raiseException() diff --git a/synapse/http/server.py b/synapse/http/server.py index e411ac7e62..f592600880 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -18,6 +18,7 @@ import html import logging import types import urllib +import urllib.parse from http import HTTPStatus from http.client import FOUND from inspect import isawaitable @@ -65,7 +66,6 @@ from synapse.api.errors import ( UnrecognizedRequestError, ) from synapse.config.homeserver import HomeServerConfig -from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet from synapse.util import json_encoder @@ -76,6 +76,7 @@ from synapse.util.iterutils import chunk_seq if TYPE_CHECKING: import opentracing + from synapse.http.site import SynapseRequest from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -102,7 +103,7 @@ HTTP_STATUS_REQUEST_CANCELLED = 499 def return_json_error( - f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig] + f: failure.Failure, request: "SynapseRequest", config: Optional[HomeServerConfig] ) -> None: """Sends a JSON error response to clients.""" @@ -220,8 +221,8 @@ def return_html_error( def wrap_async_request_handler( - h: Callable[["_AsyncResource", SynapseRequest], Awaitable[None]] -) -> Callable[["_AsyncResource", SynapseRequest], "defer.Deferred[None]"]: + h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]] +) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]: """Wraps an async request handler so that it calls request.processing. This helps ensure that work done by the request handler after the request is completed @@ -235,7 +236,7 @@ def wrap_async_request_handler( """ async def wrapped_async_request_handler( - self: "_AsyncResource", request: SynapseRequest + self: "_AsyncResource", request: "SynapseRequest" ) -> None: with request.processing(): await h(self, request) @@ -300,7 +301,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): self._extract_context = extract_context - def render(self, request: SynapseRequest) -> int: + def render(self, request: "SynapseRequest") -> int: """This gets called by twisted every time someone sends us a request.""" request.render_deferred = defer.ensureDeferred( self._async_render_wrapper(request) @@ -308,7 +309,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): return NOT_DONE_YET @wrap_async_request_handler - async def _async_render_wrapper(self, request: SynapseRequest) -> None: + async def _async_render_wrapper(self, request: "SynapseRequest") -> None: """This is a wrapper that delegates to `_async_render` and handles exceptions, return values, metrics, etc. """ @@ -326,9 +327,15 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): # of our stack, and thus gives us a sensible stack # trace. f = failure.Failure() + logger.exception( + "Error handling request", + exc_info=(f.type, f.value, f.getTracebackObject()), + ) self._send_error_response(f, request) - async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, Any]]: + async def _async_render( + self, request: "SynapseRequest" + ) -> Optional[Tuple[int, Any]]: """Delegates to `_async_render_` methods, or returns a 400 if no appropriate method exists. Can be overridden in sub classes for different routing. @@ -358,7 +365,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): @abc.abstractmethod def _send_response( self, - request: SynapseRequest, + request: "SynapseRequest", code: int, response_object: Any, ) -> None: @@ -368,7 +375,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: raise NotImplementedError() @@ -384,7 +391,7 @@ class DirectServeJsonResource(_AsyncResource): def _send_response( self, - request: SynapseRequest, + request: "SynapseRequest", code: int, response_object: Any, ) -> None: @@ -401,7 +408,7 @@ class DirectServeJsonResource(_AsyncResource): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: """Implements _AsyncResource._send_error_response""" return_json_error(f, request, None) @@ -473,7 +480,7 @@ class JsonResource(DirectServeJsonResource): ) def _get_handler_for_request( - self, request: SynapseRequest + self, request: "SynapseRequest" ) -> Tuple[ServletCallback, str, Dict[str, str]]: """Finds a callback method to handle the given request. @@ -503,7 +510,7 @@ class JsonResource(DirectServeJsonResource): # Huh. No one wanted to handle that? Fiiiiiine. raise UnrecognizedRequestError(code=404) - async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]: + async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]: callback, servlet_classname, group_dict = self._get_handler_for_request(request) request.is_render_cancellable = is_function_cancellable(callback) @@ -535,7 +542,7 @@ class JsonResource(DirectServeJsonResource): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: """Implements _AsyncResource._send_error_response""" return_json_error(f, request, self.hs.config) @@ -551,7 +558,7 @@ class DirectServeHtmlResource(_AsyncResource): def _send_response( self, - request: SynapseRequest, + request: "SynapseRequest", code: int, response_object: Any, ) -> None: @@ -565,7 +572,7 @@ class DirectServeHtmlResource(_AsyncResource): def _send_error_response( self, f: failure.Failure, - request: SynapseRequest, + request: "SynapseRequest", ) -> None: """Implements _AsyncResource._send_error_response""" return_html_error(f, request, self.ERROR_TEMPLATE) @@ -592,7 +599,7 @@ class UnrecognizedRequestResource(resource.Resource): errcode of M_UNRECOGNIZED. """ - def render(self, request: SynapseRequest) -> int: + def render(self, request: "SynapseRequest") -> int: f = failure.Failure(UnrecognizedRequestError(code=404)) return_json_error(f, request, None) # A response has already been sent but Twisted requires either NOT_DONE_YET @@ -622,7 +629,7 @@ class RootRedirect(resource.Resource): class OptionsResource(resource.Resource): """Responds to OPTION requests for itself and all children.""" - def render_OPTIONS(self, request: SynapseRequest) -> bytes: + def render_OPTIONS(self, request: "SynapseRequest") -> bytes: request.setResponseCode(204) request.setHeader(b"Content-Length", b"0") @@ -737,7 +744,7 @@ def _encode_json_bytes(json_object: object) -> bytes: def respond_with_json( - request: SynapseRequest, + request: "SynapseRequest", code: int, json_object: Any, send_cors: bool = False, @@ -787,7 +794,7 @@ def respond_with_json( def respond_with_json_bytes( - request: SynapseRequest, + request: "SynapseRequest", code: int, json_bytes: bytes, send_cors: bool = False, @@ -825,7 +832,7 @@ def respond_with_json_bytes( async def _async_write_json_to_request_in_thread( - request: SynapseRequest, + request: "SynapseRequest", json_encoder: Callable[[Any], bytes], json_object: Any, ) -> None: @@ -883,7 +890,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None: _ByteProducer(request, bytes_generator) -def set_cors_headers(request: SynapseRequest) -> None: +def set_cors_headers(request: "SynapseRequest") -> None: """Set the CORS headers so that javascript running in a web browsers can use this API @@ -981,7 +988,7 @@ def set_clickjacking_protection_headers(request: Request) -> None: def respond_with_redirect( - request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False + request: "SynapseRequest", url: bytes, statusCode: int = FOUND, cors: bool = False ) -> None: """ Write a 302 (or other specified status code) response to the request, if it is still alive. diff --git a/synapse/http/site.py b/synapse/http/site.py index 5b5a7c1e59..a388d6cf7f 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -21,25 +21,29 @@ from zope.interface import implementer from twisted.internet.address import UNIXAddress from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IAddress, IReactorTime +from twisted.internet.interfaces import IAddress from twisted.python.failure import Failure from twisted.web.http import HTTPChannel from twisted.web.resource import IResource, Resource -from twisted.web.server import Request, Site +from twisted.web.server import Request from synapse.config.server import ListenerConfig from synapse.http import get_request_user_agent, redact_uri +from synapse.http.proxy import ProxySite from synapse.http.request_metrics import RequestMetrics, requests_counter from synapse.logging.context import ( ContextRequest, LoggingContext, PreserveLoggingContext, ) -from synapse.types import Requester +from synapse.types import ISynapseReactor, Requester if TYPE_CHECKING: import opentracing + from synapse.server import HomeServer + + logger = logging.getLogger(__name__) _next_request_seq = 0 @@ -102,7 +106,7 @@ class SynapseRequest(Request): # A boolean indicating whether `render_deferred` should be cancelled if the # client disconnects early. Expected to be set by the coroutine started by # `Resource.render`, if rendering is asynchronous. - self.is_render_cancellable = False + self.is_render_cancellable: bool = False global _next_request_seq self.request_seq = _next_request_seq @@ -601,7 +605,7 @@ class _XForwardedForAddress: host: str -class SynapseSite(Site): +class SynapseSite(ProxySite): """ Synapse-specific twisted http Site @@ -623,7 +627,8 @@ class SynapseSite(Site): resource: IResource, server_version_string: str, max_request_body_size: int, - reactor: IReactorTime, + reactor: ISynapseReactor, + hs: "HomeServer", ): """ @@ -638,7 +643,11 @@ class SynapseSite(Site): dropping the connection reactor: reactor to be used to manage connection timeouts """ - Site.__init__(self, resource, reactor=reactor) + super().__init__( + resource=resource, + reactor=reactor, + hs=hs, + ) self.site_tag = site_tag self.reactor = reactor @@ -649,7 +658,9 @@ class SynapseSite(Site): request_id_header = config.http_options.request_id_header - self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886 + self.experimental_cors_msc3886: bool = ( + config.http_options.experimental_cors_msc3886 + ) def request_factory(channel: HTTPChannel, queued: bool) -> Request: return request_class( diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 5a965f233b..21c5309740 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -31,9 +31,7 @@ from tests.unittest import HomeserverTestCase class FederationReaderOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer - ) + hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) return hs def default_config(self) -> JsonDict: @@ -91,9 +89,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): @patch("synapse.app.homeserver.KeyResource", new=Mock()) class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=SynapseHomeServer - ) + hs = self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer) return hs @parameterized.expand( diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index ee48f9e546..66215af2b8 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -41,7 +41,6 @@ class DeviceTestCase(unittest.HomeserverTestCase): self.appservice_api = mock.Mock() hs = self.setup_test_homeserver( "server", - federation_http_client=None, application_service_api=self.appservice_api, ) handler = hs.get_device_handler() @@ -401,7 +400,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): class DehydrationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + hs = self.setup_test_homeserver("server") handler = hs.get_device_handler() assert isinstance(handler, DeviceHandler) self.handler = handler diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index bf0862ed54..5f11d5df11 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -57,7 +57,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver(federation_http_client=None) + hs = self.setup_test_homeserver() self.handler = hs.get_federation_handler() self.store = hs.get_datastores().main return hs diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 19f5322317..fd66d573d2 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -993,7 +993,6 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( "server", - federation_http_client=None, federation_sender=Mock(spec=FederationSender), ) return hs diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 94518a7196..5da1d95f0b 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -17,6 +17,8 @@ import json from typing import Dict, List, Set from unittest.mock import ANY, Mock, call +from netaddr import IPSet + from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource @@ -24,6 +26,7 @@ from synapse.api.constants import EduTypes from synapse.api.errors import AuthError from synapse.federation.transport.server import TransportLayerServer from synapse.handlers.typing import TypingWriterHandler +from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.server import HomeServer from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock @@ -76,6 +79,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): # we mock out the federation client too self.mock_federation_client = Mock(spec=["put_json"]) self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) + self.mock_federation_client.agent = MatrixFederationAgent( + reactor, + tls_client_options_factory=None, + user_agent=b"SynapseInTrialTest/0.0.0", + ip_allowlist=None, + ip_blocklist=IPSet(), + ) # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index b5f4a60fe5..ab94f3f67a 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Generator -from unittest.mock import Mock +from typing import Any, Dict, Generator +from unittest.mock import ANY, Mock, create_autospec from netaddr import IPSet from parameterized import parameterized @@ -21,10 +21,12 @@ from twisted.internet import defer from twisted.internet.defer import Deferred, TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError from twisted.test.proto_helpers import MemoryReactor, StringTransport -from twisted.web.client import ResponseNeverReceived +from twisted.web.client import Agent, ResponseNeverReceived from twisted.web.http import HTTPChannel +from twisted.web.http_headers import Headers -from synapse.api.errors import RequestSendFailed +from synapse.api.errors import HttpResponseException, RequestSendFailed +from synapse.config._base import ConfigError from synapse.http.matrixfederationclient import ( ByteParser, MatrixFederationHttpClient, @@ -39,7 +41,9 @@ from synapse.logging.context import ( from synapse.server import HomeServer from synapse.util import Clock +from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeTransport +from tests.test_utils import FakeResponse from tests.unittest import HomeserverTestCase, override_config @@ -658,3 +662,275 @@ class FederationClientTests(HomeserverTestCase): self.assertEqual(self.cl.max_short_retry_delay_seconds, 7) self.assertEqual(self.cl.max_long_retries, 20) self.assertEqual(self.cl.max_short_retries, 5) + + +class FederationClientProxyTests(BaseMultiWorkerStreamTestCase): + def default_config(self) -> Dict[str, Any]: + conf = super().default_config() + conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, + "federation_sender": {"host": "testserv", "port": 1001}, + } + return conf + + @override_config( + { + "outbound_federation_restricted_to": ["federation_sender"], + "worker_replication_secret": "secret", + } + ) + def test_proxy_requests_through_federation_sender_worker(self) -> None: + """ + Test that all outbound federation requests go through the `federation_sender` + worker + """ + # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance + # so we can act like some remote server responding to requests + mock_client_on_federation_sender = Mock() + mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) + mock_client_on_federation_sender.agent = mock_agent_on_federation_sender + + # Create the `federation_sender` worker + self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "federation_sender"}, + federation_http_client=mock_client_on_federation_sender, + ) + + # Fake `remoteserv:8008` responding to requests + mock_agent_on_federation_sender.request.side_effect = ( + lambda *args, **kwargs: defer.succeed( + FakeResponse.json( + payload={ + "foo": "bar", + } + ) + ) + ) + + # This federation request from the main process should be proxied through the + # `federation_sender` worker off to the remote server + test_request_from_main_process_d = defer.ensureDeferred( + self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar") + ) + + # Pump the reactor so our deferred goes through the motions + self.pump() + + # Make sure that the request was proxied through the `federation_sender` worker + mock_agent_on_federation_sender.request.assert_called_once_with( + b"GET", + b"matrix-federation://remoteserv:8008/foo/bar", + headers=ANY, + bodyProducer=ANY, + ) + + # Make sure the response is as expected back on the main worker + res = self.successResultOf(test_request_from_main_process_d) + self.assertEqual(res, {"foo": "bar"}) + + @override_config( + { + "outbound_federation_restricted_to": ["federation_sender"], + "worker_replication_secret": "secret", + } + ) + def test_proxy_request_with_network_error_through_federation_sender_worker( + self, + ) -> None: + """ + Test that when the outbound federation request fails with a network related + error, a sensible error makes its way back to the main process. + """ + # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance + # so we can act like some remote server responding to requests + mock_client_on_federation_sender = Mock() + mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) + mock_client_on_federation_sender.agent = mock_agent_on_federation_sender + + # Create the `federation_sender` worker + self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "federation_sender"}, + federation_http_client=mock_client_on_federation_sender, + ) + + # Fake `remoteserv:8008` responding to requests + mock_agent_on_federation_sender.request.side_effect = ( + lambda *args, **kwargs: defer.fail(ResponseNeverReceived("fake error")) + ) + + # This federation request from the main process should be proxied through the + # `federation_sender` worker off to the remote server + test_request_from_main_process_d = defer.ensureDeferred( + self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar") + ) + + # Pump the reactor so our deferred goes through the motions. We pump with 10 + # seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries + # and finally passes along the error response. + self.pump(0.1) + + # Make sure that the request was proxied through the `federation_sender` worker + mock_agent_on_federation_sender.request.assert_called_with( + b"GET", + b"matrix-federation://remoteserv:8008/foo/bar", + headers=ANY, + bodyProducer=ANY, + ) + + # Make sure we get some sort of error back on the main worker + failure_res = self.failureResultOf(test_request_from_main_process_d) + self.assertIsInstance(failure_res.value, RequestSendFailed) + self.assertIsInstance(failure_res.value.inner_exception, HttpResponseException) + self.assertEqual(failure_res.value.inner_exception.code, 502) + + @override_config( + { + "outbound_federation_restricted_to": ["federation_sender"], + "worker_replication_secret": "secret", + } + ) + def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None: + """ + Test to make sure hop-by-hop headers and addional headers defined in the + `Connection` header are discarded when proxying requests + """ + # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance + # so we can act like some remote server responding to requests + mock_client_on_federation_sender = Mock() + mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) + mock_client_on_federation_sender.agent = mock_agent_on_federation_sender + + # Create the `federation_sender` worker + self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "federation_sender"}, + federation_http_client=mock_client_on_federation_sender, + ) + + # Fake `remoteserv:8008` responding to requests + mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed( + FakeResponse( + code=200, + body=b'{"foo": "bar"}', + headers=Headers( + { + "Content-Type": ["application/json"], + "Connection": ["close, X-Foo, X-Bar"], + # Should be removed because it's defined in the `Connection` header + "X-Foo": ["foo"], + "X-Bar": ["bar"], + # Should be removed because it's a hop-by-hop header + "Proxy-Authorization": "abcdef", + } + ), + ) + ) + + # This federation request from the main process should be proxied through the + # `federation_sender` worker off to the remote server + test_request_from_main_process_d = defer.ensureDeferred( + self.hs.get_federation_http_client().get_json_with_headers( + "remoteserv:8008", "foo/bar" + ) + ) + + # Pump the reactor so our deferred goes through the motions + self.pump() + + # Make sure that the request was proxied through the `federation_sender` worker + mock_agent_on_federation_sender.request.assert_called_once_with( + b"GET", + b"matrix-federation://remoteserv:8008/foo/bar", + headers=ANY, + bodyProducer=ANY, + ) + + res, headers = self.successResultOf(test_request_from_main_process_d) + header_names = set(headers.keys()) + + # Make sure the response does not include the hop-by-hop headers + self.assertNotIn(b"X-Foo", header_names) + self.assertNotIn(b"X-Bar", header_names) + self.assertNotIn(b"Proxy-Authorization", header_names) + # Make sure the response is as expected back on the main worker + self.assertEqual(res, {"foo": "bar"}) + + @override_config( + { + "outbound_federation_restricted_to": ["federation_sender"], + # `worker_replication_secret` is set here so that the test setup is able to pass + # but the actual homserver creation test is in the test body below + "worker_replication_secret": "secret", + } + ) + def test_not_able_to_proxy_requests_through_federation_sender_worker_when_no_secret_configured( + self, + ) -> None: + """ + Test that we aren't able to proxy any outbound federation requests when + `worker_replication_secret` is not configured. + """ + with self.assertRaises(ConfigError): + # Create the `federation_sender` worker + self.make_worker_hs( + "synapse.app.generic_worker", + { + "worker_name": "federation_sender", + # Test that we aren't able to proxy any outbound federation requests + # when `worker_replication_secret` is not configured. + "worker_replication_secret": None, + }, + ) + + @override_config( + { + "outbound_federation_restricted_to": ["federation_sender"], + "worker_replication_secret": "secret", + } + ) + def test_not_able_to_proxy_requests_through_federation_sender_worker_when_wrong_auth_given( + self, + ) -> None: + """ + Test that we aren't able to proxy any outbound federation requests when the + wrong authorization is given. + """ + # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance + # so we can act like some remote server responding to requests + mock_client_on_federation_sender = Mock() + mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True) + mock_client_on_federation_sender.agent = mock_agent_on_federation_sender + + # Create the `federation_sender` worker + self.make_worker_hs( + "synapse.app.generic_worker", + { + "worker_name": "federation_sender", + # Test that we aren't able to proxy any outbound federation requests + # when `worker_replication_secret` is wrong. + "worker_replication_secret": "wrong", + }, + federation_http_client=mock_client_on_federation_sender, + ) + + # This federation request from the main process should be proxied through the + # `federation_sender` worker off but will fail here because it's using the wrong + # authorization. + test_request_from_main_process_d = defer.ensureDeferred( + self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar") + ) + + # Pump the reactor so our deferred goes through the motions. We pump with 10 + # seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries + # and finally passes along the error response. + self.pump(0.1) + + # Make sure that the request was *NOT* proxied through the `federation_sender` + # worker + mock_agent_on_federation_sender.request.assert_not_called() + + failure_res = self.failureResultOf(test_request_from_main_process_d) + self.assertIsInstance(failure_res.value, HttpResponseException) + self.assertEqual(failure_res.value.code, 401) diff --git a/tests/http/test_proxy.py b/tests/http/test_proxy.py new file mode 100644 index 0000000000..0dc9ba8e05 --- /dev/null +++ b/tests/http/test_proxy.py @@ -0,0 +1,53 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Set + +from parameterized import parameterized + +from synapse.http.proxy import parse_connection_header_value + +from tests.unittest import TestCase + + +class ProxyTests(TestCase): + @parameterized.expand( + [ + [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + # No whitespace + [b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}], + # More whitespace + [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + # "close" directive in not the first position + [b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}], + # Normalizes header capitalization + [b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}], + # Handles header names with whitespace + [ + b"keep-alive, x foo, x bar", + {"Keep-Alive", "X foo", "X bar"}, + ], + ] + ) + def test_parse_connection_header_value( + self, + connection_header_value: bytes, + expected_extra_headers_to_remove: Set[str], + ) -> None: + """ + Tests that the connection header value is parsed correctly + """ + self.assertEqual( + expected_extra_headers_to_remove, + parse_connection_header_value(connection_header_value), + ) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index e0ae5a88ff..8164b0b78e 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -33,7 +33,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel from synapse.http.client import BlocklistingReactorWrapper -from synapse.http.connectproxyclient import ProxyCredentials +from synapse.http.connectproxyclient import BasicProxyCredentials from synapse.http.proxyagent import ProxyAgent, parse_proxy from tests.http import ( @@ -205,7 +205,7 @@ class ProxyParserTests(TestCase): """ proxy_cred = None if expected_credentials: - proxy_cred = ProxyCredentials(expected_credentials) + proxy_cred = BasicProxyCredentials(expected_credentials) self.assertEqual( ( expected_scheme, diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 39aadb9ed5..6712ac485d 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -70,10 +70,10 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): # Make a new HomeServer object for the worker self.reactor.lookups["testserv"] = "1.2.3.4" self.worker_hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer, config=self._get_worker_hs_config(), reactor=self.reactor, + federation_http_client=None, ) # Since we use sqlite in memory databases we need to make sure the @@ -385,6 +385,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): server_version_string="1", max_request_body_size=8192, reactor=self.reactor, + hs=worker_hs, ) worker_hs.get_replication_command_handler().start_replication(worker_hs) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 08703206a9..a324b4d31d 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -14,14 +14,18 @@ import logging from unittest.mock import Mock +from netaddr import IPSet + from synapse.api.constants import EventTypes, Membership from synapse.events.builder import EventBuilderFactory from synapse.handlers.typing import TypingWriterHandler +from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client import login, room from synapse.types import UserID, create_requester from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.server import get_clock from tests.test_utils import make_awaitable logger = logging.getLogger(__name__) @@ -41,13 +45,25 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): room.register_servlets, ] + def setUp(self) -> None: + super().setUp() + + reactor, _ = get_clock() + self.matrix_federation_agent = MatrixFederationAgent( + reactor, + tls_client_options_factory=None, + user_agent=b"SynapseInTrialTest/0.0.0", + ip_allowlist=None, + ip_blocklist=IPSet(), + ) + def test_send_event_single_sender(self) -> None: """Test that using a single federation sender worker correctly sends a new event. """ mock_client = Mock(spec=["put_json"]) mock_client.put_json.return_value = make_awaitable({}) - + mock_client.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -78,6 +94,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): """ mock_client1 = Mock(spec=["put_json"]) mock_client1.put_json.return_value = make_awaitable({}) + mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -92,6 +109,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): mock_client2 = Mock(spec=["put_json"]) mock_client2.put_json.return_value = make_awaitable({}) + mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -145,6 +163,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): """ mock_client1 = Mock(spec=["put_json"]) mock_client1.put_json.return_value = make_awaitable({}) + mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { @@ -159,6 +178,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): mock_client2 = Mock(spec=["put_json"]) mock_client2.put_json.return_value = make_awaitable({}) + mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", { diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index dcbb125a3b..e12098102b 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -40,7 +40,6 @@ class PresenceTestCase(unittest.HomeserverTestCase): hs = self.setup_test_homeserver( "red", - federation_http_client=None, federation_client=Mock(), presence_handler=self.presence_handler, ) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index f1b4e1ad2f..d013e75d55 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -67,8 +67,6 @@ class RoomBase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver( "red", - federation_http_client=None, - federation_client=Mock(), ) self.hs.get_federation_handler = Mock() # type: ignore[assignment] diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py index 9cb326d90a..f6df31aba4 100644 --- a/tests/storage/test_e2e_room_keys.py +++ b/tests/storage/test_e2e_room_keys.py @@ -31,7 +31,7 @@ room_key: RoomKey = { class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + hs = self.setup_test_homeserver("server") self.store = hs.get_datastores().main return hs diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 857e2caf2e..0282673167 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -27,7 +27,7 @@ class PurgeTests(HomeserverTestCase): servlets = [room.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver("server", federation_http_client=None) + hs = self.setup_test_homeserver("server") return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index 6861d3a6c9..809c9f175d 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -45,9 +45,7 @@ def fake_listdir(filepath: str) -> List[str]: class WorkerSchemaTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer - ) + hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) return hs def default_config(self) -> JsonDict: diff --git a/tests/test_server.py b/tests/test_server.py index dc491e06ed..36162cd1f5 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -38,7 +38,7 @@ from tests.http.server._base import test_disconnect from tests.server import ( FakeChannel, FakeSite, - ThreadedMemoryReactorClock, + get_clock, make_request, setup_test_homeserver, ) @@ -46,12 +46,11 @@ from tests.server import ( class JsonResourceTests(unittest.TestCase): def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() - self.hs_clock = Clock(self.reactor) + reactor, clock = get_clock() + self.reactor = reactor self.homeserver = setup_test_homeserver( self.addCleanup, - federation_http_client=None, - clock=self.hs_clock, + clock=clock, reactor=self.reactor, ) @@ -209,7 +208,13 @@ class JsonResourceTests(unittest.TestCase): class OptionsResourceTests(unittest.TestCase): def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() + reactor, clock = get_clock() + self.reactor = reactor + self.homeserver = setup_test_homeserver( + self.addCleanup, + clock=clock, + reactor=self.reactor, + ) class DummyResource(Resource): isLeaf = True @@ -242,6 +247,7 @@ class OptionsResourceTests(unittest.TestCase): "1.0", max_request_body_size=4096, reactor=self.reactor, + hs=self.homeserver, ) # render the request and return the channel @@ -344,7 +350,8 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): await self.callback(request) def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() + reactor, _ = get_clock() + self.reactor = reactor def test_good_response(self) -> None: async def callback(request: SynapseRequest) -> None: @@ -462,9 +469,9 @@ class DirectServeJsonResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeJsonResource` cancellation.""" def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() - self.clock = Clock(self.reactor) - self.resource = CancellableDirectServeJsonResource(self.clock) + reactor, clock = get_clock() + self.reactor = reactor + self.resource = CancellableDirectServeJsonResource(clock) self.site = FakeSite(self.resource, self.reactor) def test_cancellable_disconnect(self) -> None: @@ -496,9 +503,9 @@ class DirectServeHtmlResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeHtmlResource` cancellation.""" def setUp(self) -> None: - self.reactor = ThreadedMemoryReactorClock() - self.clock = Clock(self.reactor) - self.resource = CancellableDirectServeHtmlResource(self.clock) + reactor, clock = get_clock() + self.reactor = reactor + self.resource = CancellableDirectServeHtmlResource(clock) self.site = FakeSite(self.resource, self.reactor) def test_cancellable_disconnect(self) -> None: diff --git a/tests/unittest.py b/tests/unittest.py index c73195b32b..b0721e060c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -358,6 +358,7 @@ class HomeserverTestCase(TestCase): server_version_string="1", max_request_body_size=4096, reactor=self.reactor, + hs=self.hs, ) from tests.rest.client.utils import RestHelper From 199c2709479a833e0dc01d19773031c3d5fa63fb Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 18 Jul 2023 04:36:40 -0500 Subject: [PATCH 234/562] Add a locality to a few presence metrics (#15952) --- changelog.d/15952.misc | 1 + synapse/handlers/presence.py | 37 +++++++++++++++++++++++------------- 2 files changed, 25 insertions(+), 13 deletions(-) create mode 100644 changelog.d/15952.misc diff --git a/changelog.d/15952.misc b/changelog.d/15952.misc new file mode 100644 index 0000000000..c4160977cb --- /dev/null +++ b/changelog.d/15952.misc @@ -0,0 +1 @@ +Update presence metrics to differentiate remote vs local users. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 0a219b7962..cd7df0525f 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -95,13 +95,12 @@ bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"]) notify_reason_counter = Counter( - "synapse_handler_presence_notify_reason", "", ["reason"] + "synapse_handler_presence_notify_reason", "", ["locality", "reason"] ) state_transition_counter = Counter( - "synapse_handler_presence_state_transition", "", ["from", "to"] + "synapse_handler_presence_state_transition", "", ["locality", "from", "to"] ) - # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them # "currently_active" LAST_ACTIVE_GRANULARITY = 60 * 1000 @@ -567,8 +566,8 @@ class WorkerPresenceHandler(BasePresenceHandler): for new_state in states: old_state = self.user_to_current_state.get(new_state.user_id) self.user_to_current_state[new_state.user_id] = new_state - - if not old_state or should_notify(old_state, new_state): + is_mine = self.is_mine_id(new_state.user_id) + if not old_state or should_notify(old_state, new_state, is_mine): state_to_notify.append(new_state) stream_id = token @@ -1499,23 +1498,31 @@ class PresenceHandler(BasePresenceHandler): ) -def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool: +def should_notify( + old_state: UserPresenceState, new_state: UserPresenceState, is_mine: bool +) -> bool: """Decides if a presence state change should be sent to interested parties.""" + user_location = "remote" + if is_mine: + user_location = "local" + if old_state == new_state: return False if old_state.status_msg != new_state.status_msg: - notify_reason_counter.labels("status_msg_change").inc() + notify_reason_counter.labels(user_location, "status_msg_change").inc() return True if old_state.state != new_state.state: - notify_reason_counter.labels("state_change").inc() - state_transition_counter.labels(old_state.state, new_state.state).inc() + notify_reason_counter.labels(user_location, "state_change").inc() + state_transition_counter.labels( + user_location, old_state.state, new_state.state + ).inc() return True if old_state.state == PresenceState.ONLINE: if new_state.currently_active != old_state.currently_active: - notify_reason_counter.labels("current_active_change").inc() + notify_reason_counter.labels(user_location, "current_active_change").inc() return True if ( @@ -1524,12 +1531,16 @@ def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> ): # Only notify about last active bumps if we're not currently active if not new_state.currently_active: - notify_reason_counter.labels("last_active_change_online").inc() + notify_reason_counter.labels( + user_location, "last_active_change_online" + ).inc() return True elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: # Always notify for a transition where last active gets bumped. - notify_reason_counter.labels("last_active_change_not_online").inc() + notify_reason_counter.labels( + user_location, "last_active_change_not_online" + ).inc() return True return False @@ -1989,7 +2000,7 @@ def handle_update( ) # Check whether the change was something worth notifying about - if should_notify(prev_state, new_state): + if should_notify(prev_state, new_state, is_mine): new_state = new_state.copy_and_replace(last_federation_update_ts=now) persist_and_notify = True From e625c3dca0b8147a204fe812af36b784473a6b50 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 18 Jul 2023 03:44:09 -0700 Subject: [PATCH 235/562] Revert "Stop writing to column `user_id` of tables `profiles` and `user_filters`. (#15953) * Revert "Stop writing to column `user_id` of tables `profiles` and `user_filters` (#15787)" This reverts commit f25b0f88081bb436bef914983cff7087b54eba5f. * newsfragement --- changelog.d/15953.misc | 1 + synapse/storage/database.py | 2 - synapse/storage/databases/main/__init__.py | 6 +- synapse/storage/databases/main/filtering.py | 5 +- synapse/storage/databases/main/profile.py | 12 ++- synapse/storage/schema/__init__.py | 9 +- .../79/01_drop_user_id_constraint_profiles.py | 50 ---------- ...02_drop_user_id_constraint_user_filters.py | 54 ----------- tests/storage/test_profile.py | 63 +++++++++++++ tests/storage/test_user_filters.py | 94 +++++++++++++++++++ 10 files changed, 174 insertions(+), 122 deletions(-) create mode 100644 changelog.d/15953.misc delete mode 100644 synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py delete mode 100644 synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py create mode 100644 tests/storage/test_user_filters.py diff --git a/changelog.d/15953.misc b/changelog.d/15953.misc new file mode 100644 index 0000000000..a20e78178f --- /dev/null +++ b/changelog.d/15953.misc @@ -0,0 +1 @@ +Revert "Stop writing to column user_id of tables profiles and user_filters`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index c9d687fb2f..a1c8fb0f46 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -98,8 +98,6 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = { "event_push_summary": "event_push_summary_unique_index2", "receipts_linearized": "receipts_linearized_unique_index", "receipts_graph": "receipts_graph_unique_index", - "profiles": "profiles_full_user_id_key_idx", - "user_filters": "full_users_filters_unique_idx", } diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index b6028853c9..80c0304b19 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, List, Optional, Tuple, cast from synapse.api.constants import Direction from synapse.config.homeserver import HomeServerConfig @@ -196,7 +196,7 @@ class DataStore( txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: filters = [] - args: List[Union[str, int]] = [] + args = [self.hs.config.server.server_name] # Set ordering order_by_column = UserSortOrder(order_by).value @@ -263,7 +263,7 @@ class DataStore( sql_base = f""" FROM users as u - LEFT JOIN profiles AS p ON u.name = p.full_user_id + LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ? LEFT JOIN erased_users AS eu ON u.name = eu.user_id {where_clause} """ diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index 75f7fe8756..fff417f9e3 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -188,13 +188,14 @@ class FilteringWorkerStore(SQLBaseStore): filter_id = max_id + 1 sql = ( - "INSERT INTO user_filters (full_user_id, filter_id, filter_json)" - "VALUES(?, ?, ?)" + "INSERT INTO user_filters (full_user_id, user_id, filter_id, filter_json)" + "VALUES(?, ?, ?, ?)" ) txn.execute( sql, ( user_id.to_string(), + user_id.localpart, filter_id, bytearray(def_json), ), diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index 660a5507b7..3ba9cc8853 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -173,9 +173,10 @@ class ProfileWorkerStore(SQLBaseStore): ) async def create_profile(self, user_id: UserID) -> None: + user_localpart = user_id.localpart await self.db_pool.simple_insert( table="profiles", - values={"full_user_id": user_id.to_string()}, + values={"user_id": user_localpart, "full_user_id": user_id.to_string()}, desc="create_profile", ) @@ -190,11 +191,13 @@ class ProfileWorkerStore(SQLBaseStore): new_displayname: The new display name. If this is None, the user's display name is removed. """ + user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", - keyvalues={"full_user_id": user_id.to_string()}, + keyvalues={"user_id": user_localpart}, values={ "displayname": new_displayname, + "full_user_id": user_id.to_string(), }, desc="set_profile_displayname", ) @@ -210,10 +213,11 @@ class ProfileWorkerStore(SQLBaseStore): new_avatar_url: The new avatar URL. If this is None, the user's avatar is removed. """ + user_localpart = user_id.localpart await self.db_pool.simple_upsert( table="profiles", - keyvalues={"full_user_id": user_id.to_string()}, - values={"avatar_url": new_avatar_url}, + keyvalues={"user_id": user_localpart}, + values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()}, desc="set_profile_avatar_url", ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 6d14963c0a..fc190a8b13 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 79 # remember to update the list below when updating +SCHEMA_VERSION = 78 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -106,9 +106,6 @@ Changes in SCHEMA_VERSION = 77 Changes in SCHEMA_VERSION = 78 - Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters - -Changes in SCHEMA_VERSION = 79 - - We no longer write to column user_id of tables profiles and user_filters """ @@ -121,9 +118,7 @@ SCHEMA_COMPAT_VERSION = ( # # insertions to the column `full_user_id` of tables profiles and user_filters can no # longer be null - # - # we no longer write to column `full_user_id` of tables profiles and user_filters - 78 + 76 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py b/synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py deleted file mode 100644 index 3541266f7d..0000000000 --- a/synapse/storage/schema/main/delta/79/01_drop_user_id_constraint_profiles.py +++ /dev/null @@ -1,50 +0,0 @@ -from synapse.storage.database import LoggingTransaction -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine - - -def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: - """ - Update to drop the NOT NULL constraint on column user_id so that we can cease to - write to it without inserts to other columns triggering the constraint - """ - - if isinstance(database_engine, PostgresEngine): - drop_sql = """ - ALTER TABLE profiles ALTER COLUMN user_id DROP NOT NULL - """ - cur.execute(drop_sql) - else: - # irritatingly in SQLite we need to rewrite the table to drop the constraint. - cur.execute("DROP TABLE IF EXISTS temp_profiles") - - create_sql = """ - CREATE TABLE temp_profiles ( - full_user_id text NOT NULL, - user_id text, - displayname text, - avatar_url text, - UNIQUE (full_user_id), - UNIQUE (user_id) - ) - """ - cur.execute(create_sql) - - copy_sql = """ - INSERT INTO temp_profiles ( - user_id, - displayname, - avatar_url, - full_user_id) - SELECT user_id, displayname, avatar_url, full_user_id FROM profiles - """ - cur.execute(copy_sql) - - drop_sql = """ - DROP TABLE profiles - """ - cur.execute(drop_sql) - - rename_sql = """ - ALTER TABLE temp_profiles RENAME to profiles - """ - cur.execute(rename_sql) diff --git a/synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py b/synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py deleted file mode 100644 index 8e7569c470..0000000000 --- a/synapse/storage/schema/main/delta/79/02_drop_user_id_constraint_user_filters.py +++ /dev/null @@ -1,54 +0,0 @@ -from synapse.storage.database import LoggingTransaction -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine - - -def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: - """ - Update to drop the NOT NULL constraint on column user_id so that we can cease to - write to it without inserts to other columns triggering the constraint - """ - if isinstance(database_engine, PostgresEngine): - drop_sql = """ - ALTER TABLE user_filters ALTER COLUMN user_id DROP NOT NULL - """ - cur.execute(drop_sql) - - else: - # irritatingly in SQLite we need to rewrite the table to drop the constraint. - cur.execute("DROP TABLE IF EXISTS temp_user_filters") - - create_sql = """ - CREATE TABLE temp_user_filters ( - full_user_id text NOT NULL, - user_id text, - filter_id bigint NOT NULL, - filter_json bytea NOT NULL - ) - """ - cur.execute(create_sql) - - index_sql = """ - CREATE UNIQUE INDEX IF NOT EXISTS user_filters_full_user_id_unique ON - temp_user_filters (full_user_id, filter_id) - """ - cur.execute(index_sql) - - copy_sql = """ - INSERT INTO temp_user_filters ( - user_id, - filter_id, - filter_json, - full_user_id) - SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters - """ - cur.execute(copy_sql) - - drop_sql = """ - DROP TABLE user_filters - """ - cur.execute(drop_sql) - - rename_sql = """ - ALTER TABLE temp_user_filters RENAME to user_filters - """ - cur.execute(rename_sql) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index bbe8bd88bc..fe5bb77913 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -15,6 +15,8 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import PostgresEngine from synapse.types import UserID from synapse.util import Clock @@ -62,3 +64,64 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.assertIsNone( self.get_success(self.store.get_profile_avatar_url(self.u_frank)) ) + + def test_profiles_bg_migration(self) -> None: + """ + Test background job that copies entries from column user_id to full_user_id, adding + the hostname in the process. + """ + updater = self.hs.get_datastores().main.db_pool.updates + + # drop the constraint so we can insert nulls in full_user_id to populate the test + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + for i in range(0, 70): + self.get_success( + self.store.db_pool.simple_insert( + "profiles", + {"user_id": f"hello{i:02}"}, + ) + ) + + # re-add the constraint so that when it's validated it actually exists + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={ + "update_name": "populate_full_user_id_profiles", + "progress_json": "{}", + }, + ) + ) + + self.get_success( + updater.run_background_updates(False), + ) + + expected_values = [] + for i in range(0, 70): + expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) + + res = self.get_success( + self.store.db_pool.execute( + "", None, "SELECT full_user_id from profiles ORDER BY full_user_id" + ) + ) + self.assertEqual(len(res), len(expected_values)) + self.assertEqual(res, expected_values) diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py new file mode 100644 index 0000000000..bab802f56e --- /dev/null +++ b/tests/storage/test_user_filters.py @@ -0,0 +1,94 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import PostgresEngine +from synapse.util import Clock + +from tests import unittest + + +class UserFiltersStoreTestCase(unittest.HomeserverTestCase): + """ + Test background migration that copies entries from column user_id to full_user_id, adding + the hostname in the process. + """ + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_bg_migration(self) -> None: + updater = self.hs.get_datastores().main.db_pool.updates + + # drop the constraint so we can insert nulls in full_user_id to populate the test + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + for i in range(0, 70): + self.get_success( + self.store.db_pool.simple_insert( + "user_filters", + { + "user_id": f"hello{i:02}", + "filter_id": i, + "filter_json": bytearray(i), + }, + ) + ) + + # re-add the constraint so that when it's validated it actually exists + if isinstance(self.store.database_engine, PostgresEngine): + + def f(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID" + ) + + self.get_success(self.store.db_pool.runInteraction("", f)) + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={ + "update_name": "populate_full_user_id_user_filters", + "progress_json": "{}", + }, + ) + ) + + self.get_success( + updater.run_background_updates(False), + ) + + expected_values = [] + for i in range(0, 70): + expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) + + res = self.get_success( + self.store.db_pool.execute( + "", None, "SELECT full_user_id from user_filters ORDER BY full_user_id" + ) + ) + self.assertEqual(len(res), len(expected_values)) + self.assertEqual(res, expected_values) From 6d81aec09febe86532235141e84c4ea0b3f56049 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 18 Jul 2023 08:44:59 -0400 Subject: [PATCH 236/562] Support room version 11 (#15912) And fix a bug in the implementation of the updated redaction format (MSC2174) where the top-level redacts field was not properly added for backwards-compatibility. --- changelog.d/15912.feature | 1 + scripts-dev/complement.sh | 2 +- synapse/api/room_versions.py | 329 ++++++++--------------- synapse/event_auth.py | 28 +- synapse/events/__init__.py | 2 +- synapse/events/builder.py | 2 +- synapse/events/utils.py | 31 ++- synapse/federation/federation_base.py | 2 +- synapse/federation/federation_client.py | 6 +- synapse/federation/federation_server.py | 6 +- synapse/handlers/event_auth.py | 4 +- synapse/handlers/federation.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/room_summary.py | 4 +- synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/rest/client/room.py | 4 +- synapse/storage/databases/main/room.py | 2 +- tests/events/test_utils.py | 30 +-- tests/rest/client/test_redactions.py | 21 +- 19 files changed, 190 insertions(+), 290 deletions(-) create mode 100644 changelog.d/15912.feature diff --git a/changelog.d/15912.feature b/changelog.d/15912.feature new file mode 100644 index 0000000000..0faed11eda --- /dev/null +++ b/changelog.d/15912.feature @@ -0,0 +1 @@ +Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index fea76cb5af..8416b55674 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -214,7 +214,7 @@ fi extra_test_args=() -test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins" +test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins" # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 25c105a4c8..e7662d5b99 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -78,36 +78,29 @@ class RoomVersion: # MSC2209: Check 'notifications' key while verifying # m.room.power_levels auth rules. limit_notifications_power_levels: bool - # MSC2175: No longer include the creator in m.room.create events. - msc2175_implicit_room_creator: bool - # MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to - # content property. - msc2176_redaction_rules: bool - # MSC3083: Support the 'restricted' join_rule. - msc3083_join_rules: bool - # MSC3375: Support for the proper redaction rules for MSC3083. This mustn't - # be enabled if MSC3083 is not. - msc3375_redaction_rules: bool - # MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending - # m.room.membership event with membership 'knock'. - msc2403_knocking: bool + # No longer include the creator in m.room.create events. + implicit_room_creator: bool + # Apply updated redaction rules algorithm from room version 11. + updated_redaction_rules: bool + # Support the 'restricted' join rule. + restricted_join_rule: bool + # Support for the proper redaction rules for the restricted join rule. This requires + # restricted_join_rule to be enabled. + restricted_join_rule_fix: bool + # Support the 'knock' join rule. + knock_join_rule: bool # MSC3389: Protect relation information from redaction. msc3389_relation_redactions: bool - # MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of - # knocks and restricted join rules into the same join condition. - msc3787_knock_restricted_join_rule: bool - # MSC3667: Enforce integer power levels - msc3667_int_only_power_levels: bool - # MSC3821: Do not redact the third_party_invite content field for membership events. - msc3821_redaction_rules: bool + # Support the 'knock_restricted' join rule. + knock_restricted_join_rule: bool + # Enforce integer power levels + enforce_int_power_levels: bool # MSC3931: Adds a push rule condition for "room version feature flags", making # some push rules room version dependent. Note that adding a flag to this list # is not enough to mark it "supported": the push rule evaluator also needs to # support the flag. Unknown flags are ignored by the evaluator, making conditions # fail if used. msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag - # MSC3989: Redact the origin field. - msc3989_redaction_rules: bool class RoomVersions: @@ -120,17 +113,15 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=False, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=False, + restricted_join_rule_fix=False, + knock_join_rule=False, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, ) V2 = RoomVersion( "2", @@ -141,17 +132,15 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=False, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=False, + restricted_join_rule_fix=False, + knock_join_rule=False, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, ) V3 = RoomVersion( "3", @@ -162,17 +151,15 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=False, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=False, + restricted_join_rule_fix=False, + knock_join_rule=False, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, ) V4 = RoomVersion( "4", @@ -183,17 +170,15 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=False, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=False, + restricted_join_rule_fix=False, + knock_join_rule=False, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, ) V5 = RoomVersion( "5", @@ -204,17 +189,15 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=False, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=False, + restricted_join_rule_fix=False, + knock_join_rule=False, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, ) V6 = RoomVersion( "6", @@ -225,38 +208,15 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=False, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=False, + restricted_join_rule_fix=False, + knock_join_rule=False, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, - ) - MSC2176 = RoomVersion( - "org.matrix.msc2176", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=True, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=False, - msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, - msc3931_push_features=(), - msc3989_redaction_rules=False, ) V7 = RoomVersion( "7", @@ -267,17 +227,15 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=False, - msc3375_redaction_rules=False, - msc2403_knocking=True, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=False, + restricted_join_rule_fix=False, + knock_join_rule=True, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, ) V8 = RoomVersion( "8", @@ -288,17 +246,15 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc3375_redaction_rules=False, - msc2403_knocking=True, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=True, + restricted_join_rule_fix=False, + knock_join_rule=True, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, ) V9 = RoomVersion( "9", @@ -309,59 +265,15 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc3375_redaction_rules=True, - msc2403_knocking=True, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, + knock_restricted_join_rule=False, + enforce_int_power_levels=False, msc3931_push_features=(), - msc3989_redaction_rules=False, - ) - MSC3787 = RoomVersion( - "org.matrix.msc3787", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc3375_redaction_rules=True, - msc2403_knocking=True, - msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=True, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=False, - msc3931_push_features=(), - msc3989_redaction_rules=False, - ) - MSC3821 = RoomVersion( - "org.matrix.msc3821.opt1", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc3375_redaction_rules=True, - msc2403_knocking=True, - msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=False, - msc3667_int_only_power_levels=False, - msc3821_redaction_rules=True, - msc3931_push_features=(), - msc3989_redaction_rules=False, ) V10 = RoomVersion( "10", @@ -372,17 +284,15 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc3375_redaction_rules=True, - msc2403_knocking=True, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=True, - msc3667_int_only_power_levels=True, - msc3821_redaction_rules=False, + knock_restricted_join_rule=True, + enforce_int_power_levels=True, msc3931_push_features=(), - msc3989_redaction_rules=False, ) MSC1767v10 = RoomVersion( # MSC1767 (Extensible Events) based on room version "10" @@ -394,60 +304,34 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc3375_redaction_rules=True, - msc2403_knocking=True, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=True, - msc3667_int_only_power_levels=True, - msc3821_redaction_rules=False, + knock_restricted_join_rule=True, + enforce_int_power_levels=True, msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), - msc3989_redaction_rules=False, ) - MSC3989 = RoomVersion( - "org.matrix.msc3989", - RoomDisposition.UNSTABLE, + V11 = RoomVersion( + "11", + RoomDisposition.STABLE, EventFormatVersions.ROOM_V4_PLUS, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, - msc2175_implicit_room_creator=False, - msc2176_redaction_rules=False, - msc3083_join_rules=True, - msc3375_redaction_rules=True, - msc2403_knocking=True, + implicit_room_creator=True, # Used by MSC3820 + updated_redaction_rules=True, # Used by MSC3820 + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=True, - msc3667_int_only_power_levels=True, - msc3821_redaction_rules=False, + knock_restricted_join_rule=True, + enforce_int_power_levels=True, msc3931_push_features=(), - msc3989_redaction_rules=True, - ) - MSC3820opt2 = RoomVersion( - # Based upon v10 - "org.matrix.msc3820.opt2", - RoomDisposition.UNSTABLE, - EventFormatVersions.ROOM_V4_PLUS, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=False, - strict_canonicaljson=True, - limit_notifications_power_levels=True, - msc2175_implicit_room_creator=True, # Used by MSC3820 - msc2176_redaction_rules=True, # Used by MSC3820 - msc3083_join_rules=True, - msc3375_redaction_rules=True, - msc2403_knocking=True, - msc3389_relation_redactions=False, - msc3787_knock_restricted_join_rule=True, - msc3667_int_only_power_levels=True, - msc3821_redaction_rules=True, # Used by MSC3820 - msc3931_push_features=(), - msc3989_redaction_rules=True, # Used by MSC3820 ) @@ -460,14 +344,11 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V4, RoomVersions.V5, RoomVersions.V6, - RoomVersions.MSC2176, RoomVersions.V7, RoomVersions.V8, RoomVersions.V9, - RoomVersions.MSC3787, RoomVersions.V10, - RoomVersions.MSC3989, - RoomVersions.MSC3820opt2, + RoomVersions.V11, ) } @@ -496,12 +377,12 @@ MSC3244_CAPABILITIES = { RoomVersionCapability( "knock", RoomVersions.V7, - lambda room_version: room_version.msc2403_knocking, + lambda room_version: room_version.knock_join_rule, ), RoomVersionCapability( "restricted", RoomVersions.V9, - lambda room_version: room_version.msc3083_join_rules, + lambda room_version: room_version.restricted_join_rule, ), ) } diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 3aaf53dfbd..3a260a492b 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -126,7 +126,7 @@ def validate_event_for_room_version(event: "EventBase") -> None: raise AuthError(403, "Event not signed by sending server") is_invite_via_allow_rule = ( - event.room_version.msc3083_join_rules + event.room_version.restricted_join_rule and event.type == EventTypes.Member and event.membership == Membership.JOIN and EventContentFields.AUTHORISING_USER in event.content @@ -352,11 +352,9 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = { RoomVersions.V4, RoomVersions.V5, RoomVersions.V6, - RoomVersions.MSC2176, RoomVersions.V7, RoomVersions.V8, RoomVersions.V9, - RoomVersions.MSC3787, RoomVersions.V10, RoomVersions.MSC1767v10, } @@ -449,7 +447,7 @@ def _check_create(event: "EventBase") -> None: # 1.4 If content has no creator field, reject if the room version requires it. if ( - not event.room_version.msc2175_implicit_room_creator + not event.room_version.implicit_room_creator and EventContentFields.ROOM_CREATOR not in event.content ): raise AuthError(403, "Create event lacks a 'creator' property") @@ -486,7 +484,7 @@ def _is_membership_change_allowed( key = (EventTypes.Create, "") create = auth_events.get(key) if create and event.prev_event_ids()[0] == create.event_id: - if room_version.msc2175_implicit_room_creator: + if room_version.implicit_room_creator: creator = create.sender else: creator = create.content[EventContentFields.ROOM_CREATOR] @@ -509,7 +507,7 @@ def _is_membership_change_allowed( caller_invited = caller and caller.membership == Membership.INVITE caller_knocked = ( caller - and room_version.msc2403_knocking + and room_version.knock_join_rule and caller.membership == Membership.KNOCK ) @@ -609,9 +607,9 @@ def _is_membership_change_allowed( elif join_rule == JoinRules.PUBLIC: pass elif ( - room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED + room_version.restricted_join_rule and join_rule == JoinRules.RESTRICTED ) or ( - room_version.msc3787_knock_restricted_join_rule + room_version.knock_restricted_join_rule and join_rule == JoinRules.KNOCK_RESTRICTED ): # This is the same as public, but the event must contain a reference @@ -641,9 +639,9 @@ def _is_membership_change_allowed( elif ( join_rule == JoinRules.INVITE - or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK) + or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK) or ( - room_version.msc3787_knock_restricted_join_rule + room_version.knock_restricted_join_rule and join_rule == JoinRules.KNOCK_RESTRICTED ) ): @@ -677,9 +675,9 @@ def _is_membership_change_allowed( "You don't have permission to ban", errcode=Codes.INSUFFICIENT_POWER, ) - elif room_version.msc2403_knocking and Membership.KNOCK == membership: + elif room_version.knock_join_rule and Membership.KNOCK == membership: if join_rule != JoinRules.KNOCK and ( - not room_version.msc3787_knock_restricted_join_rule + not room_version.knock_restricted_join_rule or join_rule != JoinRules.KNOCK_RESTRICTED ): raise AuthError(403, "You don't have permission to knock") @@ -836,7 +834,7 @@ def _check_power_levels( # Reject events with stringy power levels if required by room version if ( event.type == EventTypes.PowerLevels - and room_version_obj.msc3667_int_only_power_levels + and room_version_obj.enforce_int_power_levels ): for k, v in event.content.items(): if k in { @@ -972,7 +970,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in key = (EventTypes.Create, "") create_event = auth_events.get(key) if create_event is not None: - if create_event.room_version.msc2175_implicit_room_creator: + if create_event.room_version.implicit_room_creator: creator = create_event.sender else: creator = create_event.content[EventContentFields.ROOM_CREATOR] @@ -1110,7 +1108,7 @@ def auth_types_for_event( ) auth_types.add(key) - if room_version.msc3083_join_rules and membership == Membership.JOIN: + if room_version.restricted_join_rule and membership == Membership.JOIN: if EventContentFields.AUTHORISING_USER in event.content: key = ( EventTypes.Member, diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 75b62adb33..35257a3b1b 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -346,7 +346,7 @@ class EventBase(metaclass=abc.ABCMeta): @property def redacts(self) -> Optional[str]: """MSC2176 moved the redacts field into the content.""" - if self.room_version.msc2176_redaction_rules: + if self.room_version.updated_redaction_rules: return self.content.get("redacts") return self.get("redacts") diff --git a/synapse/events/builder.py b/synapse/events/builder.py index a254548c6c..14ea0e6640 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -175,7 +175,7 @@ class EventBuilder: # MSC2174 moves the redacts property to the content, it is invalid to # provide it as a top-level property. - if self._redacts is not None and not self.room_version.msc2176_redaction_rules: + if self._redacts is not None and not self.room_version.updated_redaction_rules: event_dict["redacts"] = self._redacts if self._origin_server_ts is not None: diff --git a/synapse/events/utils.py b/synapse/events/utils.py index a55efcca56..ecfc5c0568 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -108,13 +108,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic "origin_server_ts", ] - # Room versions from before MSC2176 had additional allowed keys. - if not room_version.msc2176_redaction_rules: - allowed_keys.extend(["prev_state", "membership"]) - - # Room versions before MSC3989 kept the origin field. - if not room_version.msc3989_redaction_rules: - allowed_keys.append("origin") + # Earlier room versions from had additional allowed keys. + if not room_version.updated_redaction_rules: + allowed_keys.extend(["prev_state", "membership", "origin"]) event_type = event_dict["type"] @@ -127,9 +123,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic if event_type == EventTypes.Member: add_fields("membership") - if room_version.msc3375_redaction_rules: + if room_version.restricted_join_rule_fix: add_fields(EventContentFields.AUTHORISING_USER) - if room_version.msc3821_redaction_rules: + if room_version.updated_redaction_rules: # Preserve the signed field under third_party_invite. third_party_invite = event_dict["content"].get("third_party_invite") if isinstance(third_party_invite, collections.abc.Mapping): @@ -141,13 +137,13 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic elif event_type == EventTypes.Create: # MSC2176 rules state that create events cannot be redacted. - if room_version.msc2176_redaction_rules: + if room_version.updated_redaction_rules: return event_dict add_fields("creator") elif event_type == EventTypes.JoinRules: add_fields("join_rule") - if room_version.msc3083_join_rules: + if room_version.restricted_join_rule: add_fields("allow") elif event_type == EventTypes.PowerLevels: add_fields( @@ -161,14 +157,14 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic "redact", ) - if room_version.msc2176_redaction_rules: + if room_version.updated_redaction_rules: add_fields("invite") elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth: add_fields("aliases") elif event_type == EventTypes.RoomHistoryVisibility: add_fields("history_visibility") - elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules: + elif event_type == EventTypes.Redaction and room_version.updated_redaction_rules: add_fields("redacts") # Protect the rel_type and event_id fields under the m.relates_to field. @@ -477,6 +473,15 @@ def serialize_event( if config.as_client_event: d = config.event_format(d) + # If the event is a redaction, copy the redacts field from the content to + # top-level for backwards compatibility. + if ( + e.type == EventTypes.Redaction + and e.room_version.updated_redaction_rules + and e.redacts is not None + ): + d["redacts"] = e.redacts + only_event_fields = config.only_event_fields if only_event_fields: if not isinstance(only_event_fields, list) or not all( diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index b77022b406..31e0260b83 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -231,7 +231,7 @@ async def _check_sigs_on_pdu( # If this is a join event for a restricted room it may have been authorised # via a different server from the sending server. Check those signatures. if ( - room_version.msc3083_join_rules + room_version.restricted_join_rule and pdu.type == EventTypes.Member and pdu.membership == Membership.JOIN and EventContentFields.AUTHORISING_USER in pdu.content diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index e5359ca558..89bd597409 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -983,7 +983,7 @@ class FederationClient(FederationBase): if not room_version: raise UnsupportedRoomVersionError() - if not room_version.msc2403_knocking and membership == Membership.KNOCK: + if not room_version.knock_join_rule and membership == Membership.KNOCK: raise SynapseError( 400, "This room version does not support knocking", @@ -1069,7 +1069,7 @@ class FederationClient(FederationBase): # * Ensure the signatures are good. # # Otherwise, fallback to the provided event. - if room_version.msc3083_join_rules and response.event: + if room_version.restricted_join_rule and response.event: event = response.event valid_pdu = await self._check_sigs_and_hash_and_fetch_one( @@ -1195,7 +1195,7 @@ class FederationClient(FederationBase): # MSC3083 defines additional error codes for room joins. failover_errcodes = None - if room_version.msc3083_join_rules: + if room_version.restricted_join_rule: failover_errcodes = ( Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 61fa3b30af..fa61dd8c10 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -806,7 +806,7 @@ class FederationServer(FederationBase): raise IncompatibleRoomVersionError(room_version=room_version.identifier) # Check that this room supports knocking as defined by its room version - if not room_version.msc2403_knocking: + if not room_version.knock_join_rule: raise SynapseError( 403, "This room version does not support knocking", @@ -909,7 +909,7 @@ class FederationServer(FederationBase): errcode=Codes.NOT_FOUND, ) - if membership_type == Membership.KNOCK and not room_version.msc2403_knocking: + if membership_type == Membership.KNOCK and not room_version.knock_join_rule: raise SynapseError( 403, "This room version does not support knocking", @@ -933,7 +933,7 @@ class FederationServer(FederationBase): # the event is valid to be sent into the room. Currently this is only done # if the user is being joined via restricted join rules. if ( - room_version.msc3083_join_rules + room_version.restricted_join_rule and event.membership == Membership.JOIN and EventContentFields.AUTHORISING_USER in event.content ): diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 3e37c0cbe2..82a7617a08 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -277,7 +277,7 @@ class EventAuthHandler: True if the proper room version and join rules are set for restricted access. """ # This only applies to room versions which support the new join rule. - if not room_version.msc3083_join_rules: + if not room_version.restricted_join_rule: return False # If there's no join rule, then it defaults to invite (so this doesn't apply). @@ -292,7 +292,7 @@ class EventAuthHandler: return True # also check for MSC3787 behaviour - if room_version.msc3787_knock_restricted_join_rule: + if room_version.knock_restricted_join_rule: return content_join_rule == JoinRules.KNOCK_RESTRICTED return False diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cc5ed97730..15b9fbe44a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -957,7 +957,7 @@ class FederationHandler: # Note that this requires the /send_join request to come back to the # same server. prev_event_ids = None - if room_version.msc3083_join_rules: + if room_version.restricted_join_rule: # Note that the room's state can change out from under us and render our # nice join rules-conformant event non-conformant by the time we build the # event. When this happens, our validation at the end fails and we respond diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index bf907b7881..0513e28aab 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1116,7 +1116,7 @@ class RoomCreationHandler: preset_config, config = self._room_preset_config(room_config) # MSC2175 removes the creator field from the create event. - if not room_version.msc2175_implicit_room_creator: + if not room_version.implicit_room_creator: creation_content["creator"] = creator_id creation_event, unpersisted_creation_context = await create_event( EventTypes.Create, creation_content, False diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 807245160d..dad3e23470 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -564,9 +564,9 @@ class RoomSummaryHandler: join_rule = join_rules_event.content.get("join_rule") if ( join_rule == JoinRules.PUBLIC - or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK) + or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK) or ( - room_version.msc3787_knock_restricted_join_rule + room_version.knock_restricted_join_rule and join_rule == JoinRules.KNOCK_RESTRICTED ) ): diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 67377c647b..990c079c81 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -375,7 +375,7 @@ class BulkPushRuleEvaluator: # _get_power_levels_and_sender_level in its call to get_user_power_level # (even for room V10.) notification_levels = power_levels.get("notifications", {}) - if not event.room_version.msc3667_int_only_power_levels: + if not event.room_version.enforce_int_power_levels: keys = list(notification_levels.keys()) for key in keys: level = notification_levels.get(key, SENTINEL) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 951bd033f5..dc498001e4 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -1117,7 +1117,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): # Ensure the redacts property in the content matches the one provided in # the URL. room_version = await self._store.get_room_version(room_id) - if room_version.msc2176_redaction_rules: + if room_version.updated_redaction_rules: if "redacts" in content and content["redacts"] != event_id: raise SynapseError( 400, @@ -1151,7 +1151,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): "sender": requester.user.to_string(), } # Earlier room versions had a top-level redacts property. - if not room_version.msc2176_redaction_rules: + if not room_version.updated_redaction_rules: event_dict["redacts"] = event_id ( diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index ca8be8c80d..830658f328 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -2136,7 +2136,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): raise StoreError(400, "No create event in state") # Before MSC2175, the room creator was a separate field. - if not room_version.msc2175_implicit_room_creator: + if not room_version.implicit_room_creator: room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) if not isinstance(room_creator, str): diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index c9a610db9a..6a52af4d82 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -140,18 +140,16 @@ class PruneEventTestCase(stdlib_unittest.TestCase): }, ) - # As of MSC2176 we now redact the membership and prev_states keys. + # As of room versions we now redact the membership, prev_states, and origin keys. self.run_test( - {"type": "A", "prev_state": "prev_state", "membership": "join"}, + { + "type": "A", + "prev_state": "prev_state", + "membership": "join", + "origin": "example.com", + }, {"type": "A", "content": {}, "signatures": {}, "unsigned": {}}, - room_version=RoomVersions.MSC2176, - ) - - # As of MSC3989 we now redact the origin key. - self.run_test( - {"type": "A", "origin": "example.com"}, - {"type": "A", "content": {}, "signatures": {}, "unsigned": {}}, - room_version=RoomVersions.MSC3989, + room_version=RoomVersions.V11, ) def test_unsigned(self) -> None: @@ -236,7 +234,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=RoomVersions.MSC2176, + room_version=RoomVersions.V11, ) def test_power_levels(self) -> None: @@ -286,7 +284,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=RoomVersions.MSC2176, + room_version=RoomVersions.V11, ) def test_alias_event(self) -> None: @@ -349,7 +347,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=RoomVersions.MSC2176, + room_version=RoomVersions.V11, ) def test_join_rules(self) -> None: @@ -472,7 +470,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=RoomVersions.MSC3821, + room_version=RoomVersions.V11, ) # Ensure this doesn't break if an invalid field is sent. @@ -491,7 +489,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=RoomVersions.MSC3821, + room_version=RoomVersions.V11, ) self.run_test( @@ -509,7 +507,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase): "signatures": {}, "unsigned": {}, }, - room_version=RoomVersions.MSC3821, + room_version=RoomVersions.V11, ) def test_relations(self) -> None: diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index b43e95292c..6028886bd6 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -20,6 +20,8 @@ from synapse.api.room_versions import RoomVersions from synapse.rest import admin from synapse.rest.client import login, room, sync from synapse.server import HomeServer +from synapse.storage._base import db_to_json +from synapse.storage.database import LoggingTransaction from synapse.types import JsonDict from synapse.util import Clock @@ -573,7 +575,7 @@ class RedactionsTestCase(HomeserverTestCase): room_id = self.helper.create_room_as( self.mod_user_id, tok=self.mod_access_token, - room_version=RoomVersions.MSC2176.identifier, + room_version=RoomVersions.V11.identifier, ) # Create an event. @@ -597,5 +599,20 @@ class RedactionsTestCase(HomeserverTestCase): redact_event = timeline[-1] self.assertEqual(redact_event["type"], EventTypes.Redaction) # The redacts key should be in the content. - self.assertNotIn("redacts", redact_event) self.assertEquals(redact_event["content"]["redacts"], event_id) + + # It should also be copied as the top-level redacts field for backwards + # compatibility. + self.assertEquals(redact_event["redacts"], event_id) + + # But it isn't actually part of the event. + def get_event(txn: LoggingTransaction) -> JsonDict: + return db_to_json( + main_datastore._fetch_event_rows(txn, [event_id])[event_id].json + ) + + main_datastore = self.hs.get_datastores().main + event_json = self.get_success( + main_datastore.db_pool.runInteraction("get_event", get_event) + ) + self.assertNotIn("redacts", event_json) From 69699a9bd11822635fcf2166f5dec2ce2365219c Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 18 Jul 2023 14:06:00 +0100 Subject: [PATCH 237/562] 1.88.0 --- CHANGES.md | 10 +++++++++- changelog.d/15953.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/15953.misc diff --git a/CHANGES.md b/CHANGES.md index 22d56a9a01..f379c994f0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -# Synapse 1.88.0rc1 (2023-07-11) +# Synapse 1.88.0 (2023-07-18) This release - raises the minimum supported version of Python to 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/), and @@ -6,6 +6,14 @@ This release See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#upgrading-to-v1880) for more information. + +### Bugfixes + +- Revert "Stop writing to column `user_id` of tables `profiles` and `user_filters`", which was introduced in Synapse 1.88.0rc1. ([\#15953](https://github.com/matrix-org/synapse/issues/15953)) + + +# Synapse 1.88.0rc1 (2023-07-11) + ### Features - Add `not_user_type` param to the [list accounts admin API](https://matrix-org.github.io/synapse/v1.88/admin_api/user_admin_api.html#list-accounts). ([\#15844](https://github.com/matrix-org/synapse/issues/15844)) diff --git a/changelog.d/15953.misc b/changelog.d/15953.misc deleted file mode 100644 index a20e78178f..0000000000 --- a/changelog.d/15953.misc +++ /dev/null @@ -1 +0,0 @@ -Revert "Stop writing to column user_id of tables profiles and user_filters`. diff --git a/debian/changelog b/debian/changelog index 763edb8ec2..a369e0e5c2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.88.0) stable; urgency=medium + + * New Synapse release 1.88.0. + + -- Synapse Packaging team Tue, 18 Jul 2023 13:59:28 +0100 + matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium * New Synapse release 1.88.0rc1. diff --git a/pyproject.toml b/pyproject.toml index d56602b2df..4382ff38e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.88.0rc1" +version = "1.88.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From cb6e2c6cc7f45b3d4f5516b49741d133e7b2b1c3 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 18 Jul 2023 16:59:27 -0700 Subject: [PATCH 238/562] Fix background schema updates failing over a large upgrade gap (#15887) --- changelog.d/15887.misc | 1 + ...05_mitigate_stream_ordering_update_race.py | 70 +++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 changelog.d/15887.misc create mode 100644 synapse/storage/schema/main/delta/78/05_mitigate_stream_ordering_update_race.py diff --git a/changelog.d/15887.misc b/changelog.d/15887.misc new file mode 100644 index 0000000000..7c1005078e --- /dev/null +++ b/changelog.d/15887.misc @@ -0,0 +1 @@ +Fix background schema updates failing over a large upgrade gap. diff --git a/synapse/storage/schema/main/delta/78/05_mitigate_stream_ordering_update_race.py b/synapse/storage/schema/main/delta/78/05_mitigate_stream_ordering_update_race.py new file mode 100644 index 0000000000..1a22f6a404 --- /dev/null +++ b/synapse/storage/schema/main/delta/78/05_mitigate_stream_ordering_update_race.py @@ -0,0 +1,70 @@ +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from synapse.storage.database import LoggingTransaction +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine + + +def run_create( + cur: LoggingTransaction, + database_engine: BaseDatabaseEngine, +) -> None: + """ + An attempt to mitigate a painful race between foreground and background updates + touching the `stream_ordering` column of the events table. More info can be found + at https://github.com/matrix-org/synapse/issues/15677. + """ + + # technically the bg update we're concerned with below should only have been added in + # postgres but it doesn't hurt to be extra careful + if isinstance(database_engine, PostgresEngine): + select_sql = """ + SELECT 1 FROM background_updates + WHERE update_name = 'replace_stream_ordering_column' + """ + cur.execute(select_sql) + res = cur.fetchone() + + # if the background update `replace_stream_ordering_column` is still pending, we need + # to drop the indexes added in 7403, and re-add them to the column `stream_ordering2` + # with the idea that they will be preserved when the column is renamed `stream_ordering` + # after the background update has finished + if res: + drop_cse_sql = """ + ALTER TABLE current_state_events DROP CONSTRAINT event_stream_ordering_fkey + """ + cur.execute(drop_cse_sql) + + drop_lcm_sql = """ + ALTER TABLE local_current_membership DROP CONSTRAINT event_stream_ordering_fkey + """ + cur.execute(drop_lcm_sql) + + drop_rm_sql = """ + ALTER TABLE room_memberships DROP CONSTRAINT event_stream_ordering_fkey + """ + cur.execute(drop_rm_sql) + + add_cse_sql = """ + ALTER TABLE current_state_events ADD CONSTRAINT event_stream_ordering_fkey + FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID; + """ + cur.execute(add_cse_sql) + + add_lcm_sql = """ + ALTER TABLE local_current_membership ADD CONSTRAINT event_stream_ordering_fkey + FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID; + """ + cur.execute(add_lcm_sql) + + add_rm_sql = """ + ALTER TABLE room_memberships ADD CONSTRAINT event_stream_ordering_fkey + FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID; + """ + cur.execute(add_rm_sql) From 40a3583ba14cc32f63154afc9e2c9b1058697f16 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Jul 2023 12:06:38 +0100 Subject: [PATCH 239/562] Fix race in triggers for read/write locks. (#15933) --- changelog.d/15933.misc | 1 + .../04_read_write_locks_triggers.sql.postgres | 51 -------------- .../04_read_write_locks_triggers.sql.sqlite | 47 ------------- .../03_read_write_locks_triggers.sql.postgres | 69 +++++++++++++++++++ .../03_read_write_locks_triggers.sql.sqlite | 65 +++++++++++++++++ 5 files changed, 135 insertions(+), 98 deletions(-) create mode 100644 changelog.d/15933.misc create mode 100644 synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres create mode 100644 synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite diff --git a/changelog.d/15933.misc b/changelog.d/15933.misc new file mode 100644 index 0000000000..8457994c68 --- /dev/null +++ b/changelog.d/15933.misc @@ -0,0 +1 @@ +Fix bug with read/write lock implementation. This is currently unused so has no observable effects. diff --git a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres index e1a41be9c9..e1cc3469a4 100644 --- a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres +++ b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres @@ -99,54 +99,3 @@ CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lo -- constraints. ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED; - - --- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try --- and acquire a lock, i.e. insert into `worker_read_write_locks`, -CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$ -BEGIN - INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token) - VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token) - ON CONFLICT (lock_name, lock_key) - DO NOTHING; - RETURN NEW; -END -$$ -LANGUAGE plpgsql; - -CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks - FOR EACH ROW - EXECUTE PROCEDURE upsert_read_write_lock_parent(); - - --- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock --- is released (i.e. a row deleted from `worker_read_write_locks`). Either we --- update the `worker_read_write_locks_mode.token` to match another instance --- that has currently acquired the lock, or we delete the row if nobody has --- currently acquired a lock. -CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$ -DECLARE - new_token TEXT; -BEGIN - SELECT token INTO new_token FROM worker_read_write_locks - WHERE - lock_name = OLD.lock_name - AND lock_key = OLD.lock_key; - - IF NOT FOUND THEN - DELETE FROM worker_read_write_locks_mode - WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; - ELSE - UPDATE worker_read_write_locks_mode - SET token = new_token - WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; - END IF; - - RETURN NEW; -END -$$ -LANGUAGE plpgsql; - -CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks - FOR EACH ROW - EXECUTE PROCEDURE delete_read_write_lock_parent(); diff --git a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite index be2dfbbb8a..b15432f576 100644 --- a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite +++ b/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite @@ -70,50 +70,3 @@ CREATE TABLE worker_read_write_locks ( CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token); -- Ensures that only one instance can acquire a lock in write mode at a time. CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock; - - --- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try --- and acquire a lock, i.e. insert into `worker_read_write_locks`, -CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger -BEFORE INSERT ON worker_read_write_locks -FOR EACH ROW -BEGIN - -- First ensure that `worker_read_write_locks_mode` doesn't have stale - -- entries in it, as on SQLite we don't have the foreign key constraint to - -- enforce this. - DELETE FROM worker_read_write_locks_mode - WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key - AND NOT EXISTS ( - SELECT 1 FROM worker_read_write_locks - WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key - ); - - INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token) - VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token) - ON CONFLICT (lock_name, lock_key) - DO NOTHING; -END; - --- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock --- is released (i.e. a row deleted from `worker_read_write_locks`). Either we --- update the `worker_read_write_locks_mode.token` to match another instance --- that has currently acquired the lock, or we delete the row if nobody has --- currently acquired a lock. -CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger -AFTER DELETE ON worker_read_write_locks -FOR EACH ROW -BEGIN - DELETE FROM worker_read_write_locks_mode - WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key - AND NOT EXISTS ( - SELECT 1 FROM worker_read_write_locks - WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key - ); - - UPDATE worker_read_write_locks_mode - SET token = ( - SELECT token FROM worker_read_write_locks - WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key - ) - WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; -END; diff --git a/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres new file mode 100644 index 0000000000..ea3496ef2d --- /dev/null +++ b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres @@ -0,0 +1,69 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Fix up the triggers that were in `78/04_read_write_locks_triggers.sql` + +-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try +-- and acquire a lock, i.e. insert into `worker_read_write_locks`, +CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$ +BEGIN + INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token) + VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token) + ON CONFLICT (lock_name, lock_key) + DO UPDATE SET write_lock = NEW.write_lock, token = NEW.token; + RETURN NEW; +END +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS upsert_read_write_lock_parent_trigger ON worker_read_write_locks; +CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks + FOR EACH ROW + EXECUTE PROCEDURE upsert_read_write_lock_parent(); + + +-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock +-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we +-- update the `worker_read_write_locks_mode.token` to match another instance +-- that has currently acquired the lock, or we delete the row if nobody has +-- currently acquired a lock. +CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$ +DECLARE + new_token TEXT; +BEGIN + SELECT token INTO new_token FROM worker_read_write_locks + WHERE + lock_name = OLD.lock_name + AND lock_key = OLD.lock_key + LIMIT 1 FOR UPDATE; + + IF NOT FOUND THEN + DELETE FROM worker_read_write_locks_mode + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key AND token = OLD.token; + ELSE + UPDATE worker_read_write_locks_mode + SET token = new_token + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; + END IF; + + RETURN NEW; +END +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS delete_read_write_lock_parent_trigger ON worker_read_write_locks; +CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks + FOR EACH ROW + EXECUTE PROCEDURE delete_read_write_lock_parent(); diff --git a/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite new file mode 100644 index 0000000000..acb1a77c80 --- /dev/null +++ b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite @@ -0,0 +1,65 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Fix up the triggers that were in `78/04_read_write_locks_triggers.sql` + +-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try +-- and acquire a lock, i.e. insert into `worker_read_write_locks`, +DROP TRIGGER IF EXISTS upsert_read_write_lock_parent_trigger; +CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger +BEFORE INSERT ON worker_read_write_locks +FOR EACH ROW +BEGIN + -- First ensure that `worker_read_write_locks_mode` doesn't have stale + -- entries in it, as on SQLite we don't have the foreign key constraint to + -- enforce this. + DELETE FROM worker_read_write_locks_mode + WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key + AND NOT EXISTS ( + SELECT 1 FROM worker_read_write_locks + WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key + ); + + INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token) + VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token) + ON CONFLICT (lock_name, lock_key) + DO UPDATE SET write_lock = NEW.write_lock, token = NEW.token; +END; + +-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock +-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we +-- update the `worker_read_write_locks_mode.token` to match another instance +-- that has currently acquired the lock, or we delete the row if nobody has +-- currently acquired a lock. +DROP TRIGGER IF EXISTS delete_read_write_lock_parent_trigger; +CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger +AFTER DELETE ON worker_read_write_locks +FOR EACH ROW +BEGIN + DELETE FROM worker_read_write_locks_mode + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key + AND token = OLD.token + AND NOT EXISTS ( + SELECT 1 FROM worker_read_write_locks + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key + ); + + UPDATE worker_read_write_locks_mode + SET token = ( + SELECT token FROM worker_read_write_locks + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key + ) + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; +END; From 19796e20aab31272176e24ec23be9a18cc6680a5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Jul 2023 13:17:08 +0100 Subject: [PATCH 240/562] Fix bad merge of #15933 (#15958) This was because we reverted the bump of the schema version, so we were not applying the new deltas. --- changelog.d/15958.misc | 1 + .../06_read_write_locks_triggers.sql.postgres} | 0 .../06_read_write_locks_triggers.sql.sqlite} | 0 3 files changed, 1 insertion(+) create mode 100644 changelog.d/15958.misc rename synapse/storage/schema/main/delta/{79/03_read_write_locks_triggers.sql.postgres => 78/06_read_write_locks_triggers.sql.postgres} (100%) rename synapse/storage/schema/main/delta/{79/03_read_write_locks_triggers.sql.sqlite => 78/06_read_write_locks_triggers.sql.sqlite} (100%) diff --git a/changelog.d/15958.misc b/changelog.d/15958.misc new file mode 100644 index 0000000000..8457994c68 --- /dev/null +++ b/changelog.d/15958.misc @@ -0,0 +1 @@ +Fix bug with read/write lock implementation. This is currently unused so has no observable effects. diff --git a/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.postgres similarity index 100% rename from synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres rename to synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.postgres diff --git a/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.sqlite similarity index 100% rename from synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite rename to synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.sqlite From 67f9e5293ea6650b2ec284c0b7503f3f3eade94b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Jul 2023 18:00:33 +0100 Subject: [PATCH 241/562] Ensure a long state res does not starve CPU (#15960) We do this by yielding the reactor in hot loops. --- changelog.d/15960.misc | 1 + synapse/state/v2.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15960.misc diff --git a/changelog.d/15960.misc b/changelog.d/15960.misc new file mode 100644 index 0000000000..7cac24a3c5 --- /dev/null +++ b/changelog.d/15960.misc @@ -0,0 +1 @@ +Ensure a long state res does not starve CPU by occasionally yielding to the reactor. diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 1b9d7d8457..44c49274a9 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -667,7 +667,7 @@ async def _mainline_sort( order_map = {} for idx, ev_id in enumerate(event_ids, start=1): depth = await _get_mainline_depth_for_event( - event_map[ev_id], mainline_map, event_map, state_res_store + clock, event_map[ev_id], mainline_map, event_map, state_res_store ) order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id) @@ -682,6 +682,7 @@ async def _mainline_sort( async def _get_mainline_depth_for_event( + clock: Clock, event: EventBase, mainline_map: Dict[str, int], event_map: Dict[str, EventBase], @@ -704,6 +705,7 @@ async def _get_mainline_depth_for_event( # We do an iterative search, replacing `event with the power level in its # auth events (if any) + idx = 0 while tmp_event: depth = mainline_map.get(tmp_event.event_id) if depth is not None: @@ -720,6 +722,11 @@ async def _get_mainline_depth_for_event( tmp_event = aev break + idx += 1 + + if idx % _AWAIT_AFTER_ITERATIONS == 0: + await clock.sleep(0) + # Didn't find a power level auth event, so we just return 0 return 0 From ad52db3b5cbf8b78b10a82ce45313c606b244fee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2023 10:46:37 +0100 Subject: [PATCH 242/562] Reduce the amount of state we pull out (#15968) --- changelog.d/15968.misc | 1 + synapse/handlers/federation.py | 6 ++---- synapse/handlers/message.py | 4 ++-- synapse/handlers/room_member.py | 15 +++++++++------ 4 files changed, 14 insertions(+), 12 deletions(-) create mode 100644 changelog.d/15968.misc diff --git a/changelog.d/15968.misc b/changelog.d/15968.misc new file mode 100644 index 0000000000..af7132cc72 --- /dev/null +++ b/changelog.d/15968.misc @@ -0,0 +1 @@ +Reduce the amount of state we pull out. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 15b9fbe44a..2b93b8c621 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1581,9 +1581,7 @@ class FederationHandler: event.content["third_party_invite"]["signed"]["token"], ) original_invite = None - prev_state_ids = await context.get_prev_state_ids( - StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)]) - ) + prev_state_ids = await context.get_prev_state_ids(StateFilter.from_types([key])) original_invite_id = prev_state_ids.get(key) if original_invite_id: original_invite = await self.store.get_event( @@ -1636,7 +1634,7 @@ class FederationHandler: token = signed["token"] prev_state_ids = await context.get_prev_state_ids( - StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)]) + StateFilter.from_types([(EventTypes.ThirdPartyInvite, token)]) ) invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token)) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4292b47037..9910716bc6 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -738,7 +738,7 @@ class EventCreationHandler: prev_event_id = state_map.get((EventTypes.Member, event.sender)) else: prev_state_ids = await unpersisted_context.get_prev_state_ids( - StateFilter.from_types([(EventTypes.Member, None)]) + StateFilter.from_types([(EventTypes.Member, event.sender)]) ) prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender)) prev_event = ( @@ -860,7 +860,7 @@ class EventCreationHandler: return None prev_state_ids = await context.get_prev_state_ids( - StateFilter.from_types([(event.type, None)]) + StateFilter.from_types([(event.type, event.state_key)]) ) prev_event_id = prev_state_ids.get((event.type, event.state_key)) if not prev_event_id: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 82e4fa7363..496e701f13 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -473,7 +473,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) context = await unpersisted_context.persist(event) prev_state_ids = await context.get_prev_state_ids( - StateFilter.from_types([(EventTypes.Member, None)]) + StateFilter.from_types([(EventTypes.Member, user_id)]) ) prev_member_event_id = prev_state_ids.get( @@ -1340,7 +1340,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): requester = types.create_requester(target_user) prev_state_ids = await context.get_prev_state_ids( - StateFilter.from_types([(EventTypes.GuestAccess, None)]) + StateFilter.from_types([(EventTypes.GuestAccess, "")]) ) if event.membership == Membership.JOIN: if requester.is_guest: @@ -1362,11 +1362,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ratelimit=ratelimit, ) - prev_member_event_id = prev_state_ids.get( - (EventTypes.Member, event.state_key), None - ) - if event.membership == Membership.LEAVE: + prev_state_ids = await context.get_prev_state_ids( + StateFilter.from_types([(EventTypes.Member, event.state_key)]) + ) + prev_member_event_id = prev_state_ids.get( + (EventTypes.Member, event.state_key), None + ) + if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) if prev_member_event.membership == Membership.JOIN: From fd44053b84e6519b7425f295e71e1084111bec46 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2023 11:07:58 +0100 Subject: [PATCH 243/562] Don't log exceptions for every non-200 response (#15969) Introduced in #15913 --- changelog.d/15969.feature | 1 + synapse/http/server.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/15969.feature diff --git a/changelog.d/15969.feature b/changelog.d/15969.feature new file mode 100644 index 0000000000..0d77fae2dc --- /dev/null +++ b/changelog.d/15969.feature @@ -0,0 +1 @@ +Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. diff --git a/synapse/http/server.py b/synapse/http/server.py index f592600880..5109cec983 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -327,10 +327,6 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): # of our stack, and thus gives us a sensible stack # trace. f = failure.Failure() - logger.exception( - "Error handling request", - exc_info=(f.type, f.value, f.getTracebackObject()), - ) self._send_error_response(f, request) async def _async_render( From 835174180b498a2ae7f3d014b5b190cc24d12eb7 Mon Sep 17 00:00:00 2001 From: Will Lewis <1543626+wrjlewis@users.noreply.github.com> Date: Thu, 20 Jul 2023 13:33:06 +0100 Subject: [PATCH 244/562] Fixed grafana deploy annotations in the dashboard config, so it shows for those not managing matrix.org (#15957) Removed the 'matrix.org' hardcorded instance setting Originally introduced in #15674 Co-authored-by: wrjlewis --- changelog.d/15957.bugfix | 1 + contrib/grafana/synapse.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15957.bugfix diff --git a/changelog.d/15957.bugfix b/changelog.d/15957.bugfix new file mode 100644 index 0000000000..edbe2a956a --- /dev/null +++ b/changelog.d/15957.bugfix @@ -0,0 +1 @@ +Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. \ No newline at end of file diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index f3253b32b9..90f449aa76 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -63,7 +63,7 @@ "uid": "${DS_PROMETHEUS}" }, "enable": true, - "expr": "changes(process_start_time_seconds{instance=\"matrix.org\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"matrix.org\",job=\"synapse\"}", + "expr": "changes(process_start_time_seconds{instance=\"$instance\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"$instance\",job=\"synapse\"}", "iconColor": "purple", "name": "deploys", "titleFormat": "Deployed {{version}}" From fc1e534e411174d730ca3c0c7e4d2ef7fd8be56b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 Jul 2023 15:51:28 +0100 Subject: [PATCH 245/562] Speed up updating state in large rooms (#15971) This should speed up updating state in rooms with lots of state. --- changelog.d/15971.misc | 1 + synapse/handlers/message.py | 9 +- synapse/state/__init__.py | 3 +- synapse/storage/controllers/state.py | 137 ++++++++++++++++++- synapse/storage/databases/main/roommember.py | 122 ----------------- 5 files changed, 141 insertions(+), 131 deletions(-) create mode 100644 changelog.d/15971.misc diff --git a/changelog.d/15971.misc b/changelog.d/15971.misc new file mode 100644 index 0000000000..4afd8922fc --- /dev/null +++ b/changelog.d/15971.misc @@ -0,0 +1 @@ +Speed up updating state in large rooms. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 9910716bc6..fff0b5fa12 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1565,12 +1565,11 @@ class EventCreationHandler: if state_entry.state_group in self._external_cache_joined_hosts_updates: return - state = await state_entry.get_state( - self._storage_controllers.state, StateFilter.all() - ) with opentracing.start_active_span("get_joined_hosts"): - joined_hosts = await self.store.get_joined_hosts( - event.room_id, state, state_entry + joined_hosts = ( + await self._storage_controllers.state.get_joined_hosts( + event.room_id, state_entry + ) ) # Note that the expiry times must be larger than the expiry time in diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9bc0c3b7b9..1b91cf5eaa 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -268,8 +268,7 @@ class StateHandler: The hosts in the room at the given events """ entry = await self.resolve_state_groups_for_events(room_id, event_ids) - state = await entry.get_state(self._state_storage_controller, StateFilter.all()) - return await self.store.get_joined_hosts(room_id, state, entry) + return await self._state_storage_controller.get_joined_hosts(room_id, entry) @trace @tag_args diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 233df7cce2..278c7832ba 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from itertools import chain from typing import ( TYPE_CHECKING, AbstractSet, @@ -19,14 +20,16 @@ from typing import ( Callable, Collection, Dict, + FrozenSet, Iterable, List, Mapping, Optional, Tuple, + Union, ) -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase from synapse.logging.opentracing import tag_args, trace from synapse.storage.roommember import ProfileInfo @@ -34,14 +37,20 @@ from synapse.storage.util.partial_state_events_tracker import ( PartialCurrentStateTracker, PartialStateEventsTracker, ) -from synapse.types import MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateMap, get_domain_from_id from synapse.types.state import StateFilter +from synapse.util.async_helpers import Linearizer +from synapse.util.caches import intern_string +from synapse.util.caches.descriptors import cached from synapse.util.cancellation import cancellable +from synapse.util.metrics import Measure if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.state import _StateCacheEntry from synapse.storage.databases import Databases + logger = logging.getLogger(__name__) @@ -52,10 +61,15 @@ class StateStorageController: def __init__(self, hs: "HomeServer", stores: "Databases"): self._is_mine_id = hs.is_mine_id + self._clock = hs.get_clock() self.stores = stores self._partial_state_events_tracker = PartialStateEventsTracker(stores.main) self._partial_state_room_tracker = PartialCurrentStateTracker(stores.main) + # Used by `_get_joined_hosts` to ensure only one thing mutates the cache + # at a time. Keyed by room_id. + self._joined_host_linearizer = Linearizer("_JoinedHostsCache") + def notify_event_un_partial_stated(self, event_id: str) -> None: self._partial_state_events_tracker.notify_un_partial_stated(event_id) @@ -627,3 +641,122 @@ class StateStorageController: await self._partial_state_room_tracker.await_full_state(room_id) return await self.stores.main.get_users_in_room_with_profiles(room_id) + + async def get_joined_hosts( + self, room_id: str, state_entry: "_StateCacheEntry" + ) -> FrozenSet[str]: + state_group: Union[object, int] = state_entry.state_group + if not state_group: + # If state_group is None it means it has yet to be assigned a + # state group, i.e. we need to make sure that calls with a state_group + # of None don't hit previous cached calls with a None state_group. + # To do this we set the state_group to a new object as object() != object() + state_group = object() + + assert state_group is not None + with Measure(self._clock, "get_joined_hosts"): + return await self._get_joined_hosts( + room_id, state_group, state_entry=state_entry + ) + + @cached(num_args=2, max_entries=10000, iterable=True) + async def _get_joined_hosts( + self, + room_id: str, + state_group: Union[object, int], + state_entry: "_StateCacheEntry", + ) -> FrozenSet[str]: + # We don't use `state_group`, it's there so that we can cache based on + # it. However, its important that its never None, since two + # current_state's with a state_group of None are likely to be different. + # + # The `state_group` must match the `state_entry.state_group` (if not None). + assert state_group is not None + assert state_entry.state_group is None or state_entry.state_group == state_group + + # We use a secondary cache of previous work to allow us to build up the + # joined hosts for the given state group based on previous state groups. + # + # We cache one object per room containing the results of the last state + # group we got joined hosts for. The idea is that generally + # `get_joined_hosts` is called with the "current" state group for the + # room, and so consecutive calls will be for consecutive state groups + # which point to the previous state group. + cache = await self.stores.main._get_joined_hosts_cache(room_id) + + # If the state group in the cache matches, we already have the data we need. + if state_entry.state_group == cache.state_group: + return frozenset(cache.hosts_to_joined_users) + + # Since we'll mutate the cache we need to lock. + async with self._joined_host_linearizer.queue(room_id): + if state_entry.state_group == cache.state_group: + # Same state group, so nothing to do. We've already checked for + # this above, but the cache may have changed while waiting on + # the lock. + pass + elif state_entry.prev_group == cache.state_group: + # The cached work is for the previous state group, so we work out + # the delta. + assert state_entry.delta_ids is not None + for (typ, state_key), event_id in state_entry.delta_ids.items(): + if typ != EventTypes.Member: + continue + + host = intern_string(get_domain_from_id(state_key)) + user_id = state_key + known_joins = cache.hosts_to_joined_users.setdefault(host, set()) + + event = await self.stores.main.get_event(event_id) + if event.membership == Membership.JOIN: + known_joins.add(user_id) + else: + known_joins.discard(user_id) + + if not known_joins: + cache.hosts_to_joined_users.pop(host, None) + else: + # The cache doesn't match the state group or prev state group, + # so we calculate the result from first principles. + # + # We need to fetch all hosts joined to the room according to `state` by + # inspecting all join memberships in `state`. However, if the `state` is + # relatively recent then many of its events are likely to be held in + # the current state of the room, which is easily available and likely + # cached. + # + # We therefore compute the set of `state` events not in the + # current state and only fetch those. + current_memberships = ( + await self.stores.main._get_approximate_current_memberships_in_room( + room_id + ) + ) + unknown_state_events = {} + joined_users_in_current_state = [] + + state = await state_entry.get_state( + self, StateFilter.from_types([(EventTypes.Member, None)]) + ) + + for (type, state_key), event_id in state.items(): + if event_id not in current_memberships: + unknown_state_events[type, state_key] = event_id + elif current_memberships[event_id] == Membership.JOIN: + joined_users_in_current_state.append(state_key) + + joined_user_ids = await self.stores.main.get_joined_user_ids_from_state( + room_id, unknown_state_events + ) + + cache.hosts_to_joined_users = {} + for user_id in chain(joined_user_ids, joined_users_in_current_state): + host = intern_string(get_domain_from_id(user_id)) + cache.hosts_to_joined_users.setdefault(host, set()).add(user_id) + + if state_entry.state_group: + cache.state_group = state_entry.state_group + else: + cache.state_group = object() + + return frozenset(cache.hosts_to_joined_users) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 582875c91a..fff259f74c 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from itertools import chain from typing import ( TYPE_CHECKING, AbstractSet, @@ -57,15 +56,12 @@ from synapse.types import ( StrCollection, get_domain_from_id, ) -from synapse.util.async_helpers import Linearizer -from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure if TYPE_CHECKING: from synapse.server import HomeServer - from synapse.state import _StateCacheEntry logger = logging.getLogger(__name__) @@ -91,10 +87,6 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ): super().__init__(database, db_conn, hs) - # Used by `_get_joined_hosts` to ensure only one thing mutates the cache - # at a time. Keyed by room_id. - self._joined_host_linearizer = Linearizer("_JoinedHostsCache") - self._server_notices_mxid = hs.config.servernotices.server_notices_mxid if ( @@ -1057,120 +1049,6 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): "get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn ) - async def get_joined_hosts( - self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry" - ) -> FrozenSet[str]: - state_group: Union[object, int] = state_entry.state_group - if not state_group: - # If state_group is None it means it has yet to be assigned a - # state group, i.e. we need to make sure that calls with a state_group - # of None don't hit previous cached calls with a None state_group. - # To do this we set the state_group to a new object as object() != object() - state_group = object() - - assert state_group is not None - with Measure(self._clock, "get_joined_hosts"): - return await self._get_joined_hosts( - room_id, state_group, state, state_entry=state_entry - ) - - @cached(num_args=2, max_entries=10000, iterable=True) - async def _get_joined_hosts( - self, - room_id: str, - state_group: Union[object, int], - state: StateMap[str], - state_entry: "_StateCacheEntry", - ) -> FrozenSet[str]: - # We don't use `state_group`, it's there so that we can cache based on - # it. However, its important that its never None, since two - # current_state's with a state_group of None are likely to be different. - # - # The `state_group` must match the `state_entry.state_group` (if not None). - assert state_group is not None - assert state_entry.state_group is None or state_entry.state_group == state_group - - # We use a secondary cache of previous work to allow us to build up the - # joined hosts for the given state group based on previous state groups. - # - # We cache one object per room containing the results of the last state - # group we got joined hosts for. The idea is that generally - # `get_joined_hosts` is called with the "current" state group for the - # room, and so consecutive calls will be for consecutive state groups - # which point to the previous state group. - cache = await self._get_joined_hosts_cache(room_id) - - # If the state group in the cache matches, we already have the data we need. - if state_entry.state_group == cache.state_group: - return frozenset(cache.hosts_to_joined_users) - - # Since we'll mutate the cache we need to lock. - async with self._joined_host_linearizer.queue(room_id): - if state_entry.state_group == cache.state_group: - # Same state group, so nothing to do. We've already checked for - # this above, but the cache may have changed while waiting on - # the lock. - pass - elif state_entry.prev_group == cache.state_group: - # The cached work is for the previous state group, so we work out - # the delta. - assert state_entry.delta_ids is not None - for (typ, state_key), event_id in state_entry.delta_ids.items(): - if typ != EventTypes.Member: - continue - - host = intern_string(get_domain_from_id(state_key)) - user_id = state_key - known_joins = cache.hosts_to_joined_users.setdefault(host, set()) - - event = await self.get_event(event_id) - if event.membership == Membership.JOIN: - known_joins.add(user_id) - else: - known_joins.discard(user_id) - - if not known_joins: - cache.hosts_to_joined_users.pop(host, None) - else: - # The cache doesn't match the state group or prev state group, - # so we calculate the result from first principles. - # - # We need to fetch all hosts joined to the room according to `state` by - # inspecting all join memberships in `state`. However, if the `state` is - # relatively recent then many of its events are likely to be held in - # the current state of the room, which is easily available and likely - # cached. - # - # We therefore compute the set of `state` events not in the - # current state and only fetch those. - current_memberships = ( - await self._get_approximate_current_memberships_in_room(room_id) - ) - unknown_state_events = {} - joined_users_in_current_state = [] - - for (type, state_key), event_id in state.items(): - if event_id not in current_memberships: - unknown_state_events[type, state_key] = event_id - elif current_memberships[event_id] == Membership.JOIN: - joined_users_in_current_state.append(state_key) - - joined_user_ids = await self.get_joined_user_ids_from_state( - room_id, unknown_state_events - ) - - cache.hosts_to_joined_users = {} - for user_id in chain(joined_user_ids, joined_users_in_current_state): - host = intern_string(get_domain_from_id(user_id)) - cache.hosts_to_joined_users.setdefault(host, set()).add(user_id) - - if state_entry.state_group: - cache.state_group = state_entry.state_group - else: - cache.state_group = object() - - return frozenset(cache.hosts_to_joined_users) - async def _get_approximate_current_memberships_in_room( self, room_id: str ) -> Mapping[str, Optional[str]]: From e1fa42249c7c4d58745da52d2658038d06d5e5e3 Mon Sep 17 00:00:00 2001 From: Shay Date: Sun, 23 Jul 2023 16:30:05 -0700 Subject: [PATCH 246/562] Build packages for Debian Trixie (#15961) --- changelog.d/15961.misc | 1 + scripts-dev/build_debian_packages.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15961.misc diff --git a/changelog.d/15961.misc b/changelog.d/15961.misc new file mode 100644 index 0000000000..035a330446 --- /dev/null +++ b/changelog.d/15961.misc @@ -0,0 +1 @@ +Build packages for Debian Trixie. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 1954835474..bb89ba581c 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -34,6 +34,7 @@ DISTS = ( "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04) "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) + "debian:trixie", # (EOL not specified yet) ) DESC = """\ From f08d05dd2ce8ab38240cfa691b07a27cff0356e9 Mon Sep 17 00:00:00 2001 From: Shay Date: Sun, 23 Jul 2023 16:30:54 -0700 Subject: [PATCH 247/562] Actually stop reading from column `user_id` of tables `profiles` (#15955) --- changelog.d/15955.misc | 1 + synapse/storage/databases/main/__init__.py | 4 ++-- synapse/storage/databases/main/stats.py | 4 ++-- synapse/storage/databases/main/user_directory.py | 13 ++++++------- tests/rest/admin/test_user.py | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 changelog.d/15955.misc diff --git a/changelog.d/15955.misc b/changelog.d/15955.misc new file mode 100644 index 0000000000..dc4f687e0a --- /dev/null +++ b/changelog.d/15955.misc @@ -0,0 +1 @@ +Stop reading from column `user_id` of table `profiles`. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 80c0304b19..be67d1ff22 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -196,7 +196,7 @@ class DataStore( txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: filters = [] - args = [self.hs.config.server.server_name] + args: list = [] # Set ordering order_by_column = UserSortOrder(order_by).value @@ -263,7 +263,7 @@ class DataStore( sql_base = f""" FROM users as u - LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ? + LEFT JOIN profiles AS p ON u.name = p.full_user_id LEFT JOIN erased_users AS eu ON u.name = eu.user_id {where_clause} """ diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 97c4dc2603..f34b7ce8f4 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -697,7 +697,7 @@ class StatsStore(StateDeltasStore): txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: filters = [] - args = [self.hs.config.server.server_name] + args: list = [] if search_term: filters.append("(lmr.user_id LIKE ? OR displayname LIKE ?)") @@ -733,7 +733,7 @@ class StatsStore(StateDeltasStore): sql_base = """ FROM local_media_repository as lmr - LEFT JOIN profiles AS p ON lmr.user_id = '@' || p.user_id || ':' || ? + LEFT JOIN profiles AS p ON lmr.user_id = p.full_user_id {} GROUP BY lmr.user_id, displayname """.format( diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 924022c95c..2a136f2ff6 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -409,23 +409,22 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): txn, users_to_work_on ) - # Next fetch their profiles. Note that the `user_id` here is the - # *localpart*, and that not all users have profiles. + # Next fetch their profiles. Note that not all users have profiles. profile_rows = self.db_pool.simple_select_many_txn( txn, table="profiles", - column="user_id", - iterable=[get_localpart_from_id(u) for u in users_to_insert], + column="full_user_id", + iterable=list(users_to_insert), retcols=( - "user_id", + "full_user_id", "displayname", "avatar_url", ), keyvalues={}, ) profiles = { - f"@{row['user_id']}:{self.server_name}": _UserDirProfile( - f"@{row['user_id']}:{self.server_name}", + row["full_user_id"]: _UserDirProfile( + row["full_user_id"], row["displayname"], row["avatar_url"], ) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 6f7b4bf642..9af9db6e3e 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1418,7 +1418,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): # To test deactivation for users without a profile, we delete the profile information for our user. self.get_success( self.store.db_pool.simple_delete_one( - table="profiles", keyvalues={"user_id": "user"} + table="profiles", keyvalues={"full_user_id": "@user:test"} ) ) From 5c7364fea57e24ae3ce2ac833a3521abd58312db Mon Sep 17 00:00:00 2001 From: Shay Date: Sun, 23 Jul 2023 16:32:01 -0700 Subject: [PATCH 248/562] Properly handle redactions of creation events (#15973) --- changelog.d/15973.bugfix | 1 + synapse/events/utils.py | 8 +++++--- tests/events/test_utils.py | 9 +++++++-- 3 files changed, 13 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15973.bugfix diff --git a/changelog.d/15973.bugfix b/changelog.d/15973.bugfix new file mode 100644 index 0000000000..c9280d0037 --- /dev/null +++ b/changelog.d/15973.bugfix @@ -0,0 +1 @@ +Properly handle redactions of creation events. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ecfc5c0568..c890833b1d 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -136,11 +136,13 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic ] elif event_type == EventTypes.Create: - # MSC2176 rules state that create events cannot be redacted. if room_version.updated_redaction_rules: - return event_dict + # MSC2176 rules state that create events cannot have their `content` redacted. + new_content = event_dict["content"] + elif not room_version.implicit_room_creator: + # Some room versions give meaning to `creator` + add_fields("creator") - add_fields("creator") elif event_type == EventTypes.JoinRules: add_fields("join_rule") if room_version.restricted_join_rule: diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 6a52af4d82..978612e432 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -225,9 +225,14 @@ class PruneEventTestCase(stdlib_unittest.TestCase): }, ) - # After MSC2176, create events get nothing redacted. + # After MSC2176, create events should preserve field `content` self.run_test( - {"type": "m.room.create", "content": {"not_a_real_key": True}}, + { + "type": "m.room.create", + "content": {"not_a_real_key": True}, + "origin": "some_homeserver", + "nonsense_field": "some_random_garbage", + }, { "type": "m.room.create", "content": {"not_a_real_key": True}, From 3b8348b06e34f1652623ee74da4243069f6e9783 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 10:03:05 +0100 Subject: [PATCH 249/562] Bump types-requests from 2.31.0.1 to 2.31.0.2 (#15983) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 27c8b103e5..f6d1ff7418 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3059,13 +3059,13 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.1" +version = "2.31.0.2" description = "Typing stubs for requests" optional = false python-versions = "*" files = [ - {file = "types-requests-2.31.0.1.tar.gz", hash = "sha256:3de667cffa123ce698591de0ad7db034a5317457a596eb0b4944e5a9d9e8d1ac"}, - {file = "types_requests-2.31.0.1-py3-none-any.whl", hash = "sha256:afb06ef8f25ba83d59a1d424bd7a5a939082f94b94e90ab5e6116bd2559deaa3"}, + {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"}, + {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"}, ] [package.dependencies] From 3b6208b835531404fcbabfd28356d2d31ceb3168 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 10:12:02 +0100 Subject: [PATCH 250/562] Bump pillow from 9.4.0 to 10.0.0 (#15986) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 139 ++++++++++++++++++++++------------------------------ 1 file changed, 59 insertions(+), 80 deletions(-) diff --git a/poetry.lock b/poetry.lock index f6d1ff7418..16babe926f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1621,92 +1621,71 @@ files = [ [[package]] name = "pillow" -version = "9.4.0" +version = "10.0.0" description = "Python Imaging Library (Fork)" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, - {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, - {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, - {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, - {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, - {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, - {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, - {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, - {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, - {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, - {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, - {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, - {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, - {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, - {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, - {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, - {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, - {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, - {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, - {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, - {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, - {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, - {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, - {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, - {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, - {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, + {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, + {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, + {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, + {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, + {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, + {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, + {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, + {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, + {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, + {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] [[package]] From fc566cdf0a9edc2253ce343b5f27de6230eac4a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 10:16:03 +0100 Subject: [PATCH 251/562] Bump sentry-sdk from 1.26.0 to 1.28.1 (#15985) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 16babe926f..112e8a062f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2385,13 +2385,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.26.0" +version = "1.28.1" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.26.0.tar.gz", hash = "sha256:760e4fb6d01c994110507133e08ecd4bdf4d75ee4be77f296a3579796cf73134"}, - {file = "sentry_sdk-1.26.0-py2.py3-none-any.whl", hash = "sha256:0c9f858337ec3781cf4851972ef42bba8c9828aea116b0dbed8f38c5f9a1896c"}, + {file = "sentry-sdk-1.28.1.tar.gz", hash = "sha256:dcd88c68aa64dae715311b5ede6502fd684f70d00a7cd4858118f0ba3153a3ae"}, + {file = "sentry_sdk-1.28.1-py2.py3-none-any.whl", hash = "sha256:6bdb25bd9092478d3a817cb0d01fa99e296aea34d404eac3ca0037faa5c2aa0a"}, ] [package.dependencies] From 4a711bf3790cb32edf8b36e59f0f60756f14ee58 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 10:17:02 +0100 Subject: [PATCH 252/562] Bump click from 8.1.3 to 8.1.6 (#15984) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 112e8a062f..e862a4e6c5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -397,13 +397,13 @@ files = [ [[package]] name = "click" -version = "8.1.3" +version = "8.1.6" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" files = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, + {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"}, + {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"}, ] [package.dependencies] From 654902a7583d20d7e0b57dc4634fbe573ff99993 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 24 Jul 2023 13:43:43 +0100 Subject: [PATCH 253/562] Resync stale devices in background (#15975) This is so we don't block responding to federation transaction while we try and fetch the device lists. --- changelog.d/15975.bugfix | 1 + synapse/handlers/device.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15975.bugfix diff --git a/changelog.d/15975.bugfix b/changelog.d/15975.bugfix new file mode 100644 index 0000000000..59738cca0a --- /dev/null +++ b/changelog.d/15975.bugfix @@ -0,0 +1 @@ +Fix bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 5d12a39e26..d73d9dca08 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1124,7 +1124,14 @@ class DeviceListUpdater(DeviceListWorkerUpdater): ) if resync: - await self.multi_user_device_resync([user_id]) + # We mark as stale up front in case we get restarted. + await self.store.mark_remote_users_device_caches_as_stale([user_id]) + run_as_background_process( + "_maybe_retry_device_resync", + self.multi_user_device_resync, + [user_id], + False, + ) else: # Simply update the single device, since we know that is the only # change (because of the single prev_id matching the current cache) From 05f8dada8b768c3c5c4d6cdf8e7ec2513007d9be Mon Sep 17 00:00:00 2001 From: SnipeX_ Date: Mon, 24 Jul 2023 15:06:10 +0200 Subject: [PATCH 254/562] Fix broken Arch Linux package link (#15981) --- changelog.d/15981.doc | 1 + docs/setup/installation.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15981.doc diff --git a/changelog.d/15981.doc b/changelog.d/15981.doc new file mode 100644 index 0000000000..374a5bd12f --- /dev/null +++ b/changelog.d/15981.doc @@ -0,0 +1 @@ +Fix broken Arch Linux package link. Contributed by @SnipeXandrej. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 4ca8c6b697..479f7ea543 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi #### ArchLinux -The quickest way to get up and running with ArchLinux is probably with the community package -, which should pull in most of +The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux +, which should pull in most of the necessary dependencies. pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ): From 641ff9ef7eaa7f1a632b983f4d36bb28dc23484d Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 24 Jul 2023 08:23:19 -0700 Subject: [PATCH 255/562] Support MSC3814: Dehydrated Devices (#15929) Signed-off-by: Nicolas Werner Co-authored-by: Nicolas Werner Co-authored-by: Nicolas Werner <89468146+nico-famedly@users.noreply.github.com> Co-authored-by: Hubert Chathi --- changelog.d/15929.feature | 1 + synapse/config/experimental.py | 21 +++ synapse/handlers/device.py | 4 +- synapse/handlers/devicemessage.py | 108 +++++++++++++- synapse/rest/client/devices.py | 232 +++++++++++++++++++++++++++++- tests/handlers/test_device.py | 99 ++++++++++++- tests/rest/client/test_devices.py | 150 ++++++++++++++++++- 7 files changed, 603 insertions(+), 12 deletions(-) create mode 100644 changelog.d/15929.feature diff --git a/changelog.d/15929.feature b/changelog.d/15929.feature new file mode 100644 index 0000000000..c3aaeae66e --- /dev/null +++ b/changelog.d/15929.feature @@ -0,0 +1 @@ +Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly and H-Shay. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 0970f22a75..1695ed8ca3 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -247,6 +247,27 @@ class ExperimentalConfig(Config): # MSC3026 (busy presence state) self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False) + # MSC2697 (device dehydration) + # Enabled by default since this option was added after adding the feature. + # It is not recommended that both MSC2697 and MSC3814 both be enabled at + # once. + self.msc2697_enabled: bool = experimental.get("msc2697_enabled", True) + + # MSC3814 (dehydrated devices with SSSS) + # This is an alternative method to achieve the same goals as MSC2697. + # It is not recommended that both MSC2697 and MSC3814 both be enabled at + # once. + self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False) + + if self.msc2697_enabled and self.msc3814_enabled: + raise ConfigError( + "MSC2697 and MSC3814 should not both be enabled.", + ( + "experimental_features", + "msc3814_enabled", + ), + ) + # MSC3244 (room version capabilities) self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d73d9dca08..f3a713f5fa 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -653,6 +653,7 @@ class DeviceHandler(DeviceWorkerHandler): async def store_dehydrated_device( self, user_id: str, + device_id: Optional[str], device_data: JsonDict, initial_device_display_name: Optional[str] = None, ) -> str: @@ -661,6 +662,7 @@ class DeviceHandler(DeviceWorkerHandler): Args: user_id: the user that we are storing the device for + device_id: device id supplied by client device_data: the dehydrated device information initial_device_display_name: The display name to use for the device Returns: @@ -668,7 +670,7 @@ class DeviceHandler(DeviceWorkerHandler): """ device_id = await self.check_device_registered( user_id, - None, + device_id, initial_device_display_name, ) old_device_id = await self.store.store_dehydrated_device( diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 3caf9b31cc..15e94a03cb 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -13,10 +13,11 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict +from http import HTTPStatus +from typing import TYPE_CHECKING, Any, Dict, Optional from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.logging.context import run_in_background from synapse.logging.opentracing import ( @@ -48,6 +49,9 @@ class DeviceMessageHandler: self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.is_mine = hs.is_mine + if hs.config.experimental.msc3814_enabled: + self.event_sources = hs.get_event_sources() + self.device_handler = hs.get_device_handler() # We only need to poke the federation sender explicitly if its on the # same instance. Other federation sender instances will get notified by @@ -303,3 +307,103 @@ class DeviceMessageHandler: # Enqueue a new federation transaction to send the new # device messages to each remote destination. self.federation_sender.send_device_messages(destination) + + async def get_events_for_dehydrated_device( + self, + requester: Requester, + device_id: str, + since_token: Optional[str], + limit: int, + ) -> JsonDict: + """Fetches up to `limit` events sent to `device_id` starting from `since_token` + and returns the new since token. If there are no more messages, returns an empty + array. + + Args: + requester: the user requesting the messages + device_id: ID of the dehydrated device + since_token: stream id to start from when fetching messages + limit: the number of messages to fetch + Returns: + A dict containing the to-device messages, as well as a token that the client + can provide in the next call to fetch the next batch of messages + """ + + user_id = requester.user.to_string() + + # only allow fetching messages for the dehydrated device id currently associated + # with the user + dehydrated_device = await self.device_handler.get_dehydrated_device(user_id) + if dehydrated_device is None: + raise SynapseError( + HTTPStatus.FORBIDDEN, + "No dehydrated device exists", + Codes.FORBIDDEN, + ) + + dehydrated_device_id, _ = dehydrated_device + if device_id != dehydrated_device_id: + raise SynapseError( + HTTPStatus.FORBIDDEN, + "You may only fetch messages for your dehydrated device", + Codes.FORBIDDEN, + ) + + since_stream_id = 0 + if since_token: + if not since_token.startswith("d"): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "from parameter %r has an invalid format" % (since_token,), + errcode=Codes.INVALID_PARAM, + ) + + try: + since_stream_id = int(since_token[1:]) + except Exception: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "from parameter %r has an invalid format" % (since_token,), + errcode=Codes.INVALID_PARAM, + ) + + # if we have a since token, delete any to-device messages before that token + # (since we now know that the device has received them) + deleted = await self.store.delete_messages_for_device( + user_id, device_id, since_stream_id + ) + logger.debug( + "Deleted %d to-device messages up to %d for user_id %s device_id %s", + deleted, + since_stream_id, + user_id, + device_id, + ) + + to_token = self.event_sources.get_current_token().to_device_key + + messages, stream_id = await self.store.get_messages_for_device( + user_id, device_id, since_stream_id, to_token, limit + ) + + for message in messages: + # Remove the message id before sending to client + message_id = message.pop("message_id", None) + if message_id: + set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id) + + logger.debug( + "Returning %d to-device messages between %d and %d (current token: %d) for " + "dehydrated device %s, user_id %s", + len(messages), + since_stream_id, + stream_id, + to_token, + device_id, + user_id, + ) + + return { + "events": messages, + "next_batch": f"d{stream_id}", + } diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 38dff9703f..690d2ec406 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -14,19 +14,22 @@ # limitations under the License. import logging +from http import HTTPStatus from typing import TYPE_CHECKING, List, Optional, Tuple from pydantic import Extra, StrictStr from synapse.api import errors -from synapse.api.errors import NotFoundError, UnrecognizedRequestError +from synapse.api.errors import NotFoundError, SynapseError, UnrecognizedRequestError from synapse.handlers.device import DeviceHandler from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, parse_and_validate_json_object_from_request, + parse_integer, ) from synapse.http.site import SynapseRequest +from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.rest.client.models import AuthenticationData from synapse.rest.models import RequestBodyModel @@ -229,6 +232,8 @@ class DehydratedDeviceDataModel(RequestBodyModel): class DehydratedDeviceServlet(RestServlet): """Retrieve or store a dehydrated device. + Implements either MSC2697 or MSC3814. + GET /org.matrix.msc2697.v2/dehydrated_device HTTP/1.1 200 OK @@ -261,9 +266,7 @@ class DehydratedDeviceServlet(RestServlet): """ - PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device$", releases=()) - - def __init__(self, hs: "HomeServer"): + def __init__(self, hs: "HomeServer", msc2697: bool = True): super().__init__() self.hs = hs self.auth = hs.get_auth() @@ -271,6 +274,13 @@ class DehydratedDeviceServlet(RestServlet): assert isinstance(handler, DeviceHandler) self.device_handler = handler + self.PATTERNS = client_patterns( + "/org.matrix.msc2697.v2/dehydrated_device$" + if msc2697 + else "/org.matrix.msc3814.v1/dehydrated_device$", + releases=(), + ) + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) dehydrated_device = await self.device_handler.get_dehydrated_device( @@ -293,6 +303,7 @@ class DehydratedDeviceServlet(RestServlet): device_id = await self.device_handler.store_dehydrated_device( requester.user.to_string(), + None, submission.device_data.dict(), submission.initial_device_display_name, ) @@ -347,6 +358,210 @@ class ClaimDehydratedDeviceServlet(RestServlet): return 200, result +class DehydratedDeviceEventsServlet(RestServlet): + PATTERNS = client_patterns( + "/org.matrix.msc3814.v1/dehydrated_device/(?P[^/]*)/events$", + releases=(), + ) + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.message_handler = hs.get_device_message_handler() + self.auth = hs.get_auth() + self.store = hs.get_datastores().main + + class PostBody(RequestBodyModel): + next_batch: Optional[StrictStr] + + async def on_POST( + self, request: SynapseRequest, device_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + + next_batch = parse_and_validate_json_object_from_request( + request, self.PostBody + ).next_batch + limit = parse_integer(request, "limit", 100) + + msgs = await self.message_handler.get_events_for_dehydrated_device( + requester=requester, + device_id=device_id, + since_token=next_batch, + limit=limit, + ) + + return 200, msgs + + +class DehydratedDeviceV2Servlet(RestServlet): + """Upload, retrieve, or delete a dehydrated device. + + GET /org.matrix.msc3814.v1/dehydrated_device + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id", + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + } + } + + PUT /org.matrix.msc3814.v1/dehydrated_device + Content-Type: application/json + + { + "device_id": "dehydrated_device_id", + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + }, + "device_keys": { + "user_id": "", + "device_id": "", + "valid_until_ts": , + "algorithms": [ + "m.olm.curve25519-aes-sha2", + ] + "keys": { + ":": "", + }, + "signatures:" { + "" { + ":": "" + } + } + }, + "fallback_keys": { + ":": "", + "signed_:": { + "fallback": true, + "key": "", + "signatures": { + "": { + ":": "" + } + } + } + } + "one_time_keys": { + ":": "" + }, + + } + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id" + } + + DELETE /org.matrix.msc3814.v1/dehydrated_device + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id", + } + """ + + PATTERNS = [ + *client_patterns("/org.matrix.msc3814.v1/dehydrated_device$", releases=()), + ] + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + handler = hs.get_device_handler() + assert isinstance(handler, DeviceHandler) + self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.device_handler = handler + + if hs.config.worker.worker_app is None: + # if main process + self.key_uploader = self.e2e_keys_handler.upload_keys_for_user + else: + # then a worker + self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs) + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + + dehydrated_device = await self.device_handler.get_dehydrated_device( + requester.user.to_string() + ) + + if dehydrated_device is not None: + (device_id, device_data) = dehydrated_device + result = {"device_id": device_id, "device_data": device_data} + return 200, result + else: + raise errors.NotFoundError("No dehydrated device available") + + async def on_DELETE(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + + dehydrated_device = await self.device_handler.get_dehydrated_device( + requester.user.to_string() + ) + + if dehydrated_device is not None: + (device_id, device_data) = dehydrated_device + + result = await self.device_handler.rehydrate_device( + requester.user.to_string(), + self.auth.get_access_token_from_request(request), + device_id, + ) + + result = {"device_id": device_id} + + return 200, result + else: + raise errors.NotFoundError("No dehydrated device available") + + class PutBody(RequestBodyModel): + device_data: DehydratedDeviceDataModel + device_id: StrictStr + initial_device_display_name: Optional[StrictStr] + + class Config: + extra = Extra.allow + + async def on_PUT(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + submission = parse_and_validate_json_object_from_request(request, self.PutBody) + requester = await self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + device_info = submission.dict() + if "device_keys" not in device_info.keys(): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Device key(s) not found, these must be provided.", + ) + + # TODO: Those two operations, creating a device and storing the + # device's keys should be atomic. + device_id = await self.device_handler.store_dehydrated_device( + requester.user.to_string(), + submission.device_id, + submission.device_data.dict(), + submission.initial_device_display_name, + ) + + # TODO: Do we need to do something with the result here? + await self.key_uploader( + user_id=user_id, device_id=submission.device_id, keys=submission.dict() + ) + + return 200, {"device_id": device_id} + + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if ( hs.config.worker.worker_app is None @@ -354,7 +569,12 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) + if hs.config.worker.worker_app is None: DeviceRestServlet(hs).register(http_server) - DehydratedDeviceServlet(hs).register(http_server) - ClaimDehydratedDeviceServlet(hs).register(http_server) + if hs.config.experimental.msc2697_enabled: + DehydratedDeviceServlet(hs, msc2697=True).register(http_server) + ClaimDehydratedDeviceServlet(hs).register(http_server) + if hs.config.experimental.msc3814_enabled: + DehydratedDeviceV2Servlet(hs).register(http_server) + DehydratedDeviceEventsServlet(hs).register(http_server) diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 66215af2b8..647ee09279 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -17,15 +17,18 @@ from typing import Optional from unittest import mock +from twisted.internet.defer import ensureDeferred from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import NotFoundError, SynapseError from synapse.appservice import ApplicationService from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceHandler +from synapse.rest import admin +from synapse.rest.client import devices, login, register from synapse.server import HomeServer from synapse.storage.databases.main.appservice import _make_exclusive_regex -from synapse.types import JsonDict +from synapse.types import JsonDict, create_requester from synapse.util import Clock from tests import unittest @@ -399,11 +402,19 @@ class DeviceTestCase(unittest.HomeserverTestCase): class DehydrationTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + devices.register_servlets, + ] + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver("server") handler = hs.get_device_handler() assert isinstance(handler, DeviceHandler) self.handler = handler + self.message_handler = hs.get_device_message_handler() self.registration = hs.get_registration_handler() self.auth = hs.get_auth() self.store = hs.get_datastores().main @@ -418,6 +429,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase): stored_dehydrated_device_id = self.get_success( self.handler.store_dehydrated_device( user_id=user_id, + device_id=None, device_data={"device_data": {"foo": "bar"}}, initial_device_display_name="dehydrated device", ) @@ -481,3 +493,88 @@ class DehydrationTestCase(unittest.HomeserverTestCase): ret = self.get_success(self.handler.get_dehydrated_device(user_id=user_id)) self.assertIsNone(ret) + + @unittest.override_config( + {"experimental_features": {"msc2697_enabled": False, "msc3814_enabled": True}} + ) + def test_dehydrate_v2_and_fetch_events(self) -> None: + user_id = "@boris:server" + + self.get_success(self.store.register_user(user_id, "foobar")) + + # First check if we can store and fetch a dehydrated device + stored_dehydrated_device_id = self.get_success( + self.handler.store_dehydrated_device( + user_id=user_id, + device_id=None, + device_data={"device_data": {"foo": "bar"}}, + initial_device_display_name="dehydrated device", + ) + ) + + device_info = self.get_success( + self.handler.get_dehydrated_device(user_id=user_id) + ) + assert device_info is not None + retrieved_device_id, device_data = device_info + self.assertEqual(retrieved_device_id, stored_dehydrated_device_id) + self.assertEqual(device_data, {"device_data": {"foo": "bar"}}) + + # Create a new login for the user + device_id, access_token, _expiration_time, _refresh_token = self.get_success( + self.registration.register_device( + user_id=user_id, + device_id=None, + initial_display_name="new device", + ) + ) + + requester = create_requester(user_id, device_id=device_id) + + # Fetching messages for a non-existing device should return an error + self.get_failure( + self.message_handler.get_events_for_dehydrated_device( + requester=requester, + device_id="not the right device ID", + since_token=None, + limit=10, + ), + SynapseError, + ) + + # Send a message to the dehydrated device + ensureDeferred( + self.message_handler.send_device_message( + requester=requester, + message_type="test.message", + messages={user_id: {stored_dehydrated_device_id: {"body": "foo"}}}, + ) + ) + self.pump() + + # Fetch the message of the dehydrated device + res = self.get_success( + self.message_handler.get_events_for_dehydrated_device( + requester=requester, + device_id=stored_dehydrated_device_id, + since_token=None, + limit=10, + ) + ) + + self.assertTrue(len(res["next_batch"]) > 1) + self.assertEqual(len(res["events"]), 1) + self.assertEqual(res["events"][0]["content"]["body"], "foo") + + # Fetch the message of the dehydrated device again, which should return nothing + # and delete the old messages + res = self.get_success( + self.message_handler.get_events_for_dehydrated_device( + requester=requester, + device_id=stored_dehydrated_device_id, + since_token=res["next_batch"], + limit=10, + ) + ) + self.assertTrue(len(res["next_batch"]) > 1) + self.assertEqual(len(res["events"]), 0) diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py index d80eea17d3..b7d420cfec 100644 --- a/tests/rest/client/test_devices.py +++ b/tests/rest/client/test_devices.py @@ -13,12 +13,14 @@ # limitations under the License. from http import HTTPStatus +from twisted.internet.defer import ensureDeferred from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import NotFoundError from synapse.rest import admin, devices, room, sync -from synapse.rest.client import account, login, register +from synapse.rest.client import account, keys, login, register from synapse.server import HomeServer +from synapse.types import JsonDict, create_requester from synapse.util import Clock from tests import unittest @@ -208,8 +210,13 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): login.register_servlets, register.register_servlets, devices.register_servlets, + keys.register_servlets, ] + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.registration = hs.get_registration_handler() + self.message_handler = hs.get_device_message_handler() + def test_PUT(self) -> None: """Sanity-check that we can PUT a dehydrated device. @@ -226,7 +233,21 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): "device_data": { "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", "account": "dehydrated_device", - } + }, + "device_keys": { + "user_id": "@alice:test", + "device_id": "device1", + "valid_until_ts": "80", + "algorithms": [ + "m.olm.curve25519-aes-sha2", + ], + "keys": { + ":": "", + }, + "signatures": { + "": {":": ""} + }, + }, }, access_token=token, shorthand=False, @@ -234,3 +255,128 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) device_id = channel.json_body.get("device_id") self.assertIsInstance(device_id, str) + + @unittest.override_config( + {"experimental_features": {"msc2697_enabled": False, "msc3814_enabled": True}} + ) + def test_dehydrate_msc3814(self) -> None: + user = self.register_user("mikey", "pass") + token = self.login(user, "pass", device_id="device1") + content: JsonDict = { + "device_data": { + "algorithm": "m.dehydration.v1.olm", + }, + "device_id": "device1", + "initial_device_display_name": "foo bar", + "device_keys": { + "user_id": "@mikey:test", + "device_id": "device1", + "valid_until_ts": "80", + "algorithms": [ + "m.olm.curve25519-aes-sha2", + ], + "keys": { + ":": "", + }, + "signatures": { + "": {":": ""} + }, + }, + } + channel = self.make_request( + "PUT", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + content=content, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + device_id = channel.json_body.get("device_id") + assert device_id is not None + self.assertIsInstance(device_id, str) + self.assertEqual("device1", device_id) + + # test that we can now GET the dehydrated device info + channel = self.make_request( + "GET", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + returned_device_id = channel.json_body.get("device_id") + self.assertEqual(returned_device_id, device_id) + device_data = channel.json_body.get("device_data") + expected_device_data = { + "algorithm": "m.dehydration.v1.olm", + } + self.assertEqual(device_data, expected_device_data) + + # create another device for the user + ( + new_device_id, + _, + _, + _, + ) = self.get_success( + self.registration.register_device( + user_id=user, + device_id=None, + initial_display_name="new device", + ) + ) + requester = create_requester(user, device_id=new_device_id) + + # Send a message to the dehydrated device + ensureDeferred( + self.message_handler.send_device_message( + requester=requester, + message_type="test.message", + messages={user: {device_id: {"body": "test_message"}}}, + ) + ) + self.pump() + + # make sure we can fetch the message with our dehydrated device id + channel = self.make_request( + "POST", + f"_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device/{device_id}/events", + content={}, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + expected_content = {"body": "test_message"} + self.assertEqual(channel.json_body["events"][0]["content"], expected_content) + next_batch_token = channel.json_body.get("next_batch") + + # fetch messages again and make sure that the message was deleted and we are returned an + # empty array + content = {"next_batch": next_batch_token} + channel = self.make_request( + "POST", + f"_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device/{device_id}/events", + content=content, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["events"], []) + + # make sure we can delete the dehydrated device + channel = self.make_request( + "DELETE", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + + # ...and after deleting it is no longer available + channel = self.make_request( + "GET", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 404) From 99b7b801c31b9428f9503ad6f83f11804fef048a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 14:19:46 +0200 Subject: [PATCH 256/562] Bump pygithub from 1.58.2 to 1.59.0 (#15834) Bumps [pygithub](https://github.com/pygithub/pygithub) from 1.58.2 to 1.59.0. - [Release notes](https://github.com/pygithub/pygithub/releases) - [Changelog](https://github.com/PyGithub/PyGithub/blob/main/doc/changes.rst) - [Commits](https://github.com/pygithub/pygithub/compare/v1.58.2...v1.59.0) --- updated-dependencies: - dependency-name: pygithub dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index e862a4e6c5..d5b30a11c4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1881,13 +1881,13 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygithub" -version = "1.58.2" +version = "1.59.0" description = "Use the full Github API v3" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.58.2-py3-none-any.whl", hash = "sha256:f435884af617c6debaa76cbc355372d1027445a56fbc39972a3b9ed4968badc8"}, - {file = "PyGithub-1.58.2.tar.gz", hash = "sha256:1e6b1b7afe31f75151fb81f7ab6b984a7188a852bdb123dbb9ae90023c3ce60f"}, + {file = "PyGithub-1.59.0-py3-none-any.whl", hash = "sha256:126bdbae72087d8d038b113aab6b059b4553cb59348e3024bb1a1cae406ace9e"}, + {file = "PyGithub-1.59.0.tar.gz", hash = "sha256:6e05ff49bac3caa7d1d6177a10c6e55a3e20c85b92424cc198571fd0cf786690"}, ] [package.dependencies] From dbee081d149cf1b496d147e144279b621220e429 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 25 Jul 2023 14:32:47 +0200 Subject: [PATCH 257/562] 1.89.0rc1 --- CHANGES.md | 61 +++++++++++++++++++++++++++++++++++++++ changelog.d/15708.feature | 1 - changelog.d/15820.bugfix | 1 - changelog.d/15884.misc | 1 - changelog.d/15887.misc | 1 - changelog.d/15909.misc | 1 - changelog.d/15911.feature | 1 - changelog.d/15912.feature | 1 - changelog.d/15913.feature | 1 - changelog.d/15921.doc | 1 - changelog.d/15922.misc | 1 - changelog.d/15924.feature | 1 - changelog.d/15925.bugfix | 1 - changelog.d/15926.misc | 1 - changelog.d/15928.removal | 1 - changelog.d/15929.feature | 1 - changelog.d/15933.misc | 1 - changelog.d/15938.doc | 1 - changelog.d/15940.misc | 1 - changelog.d/15952.misc | 1 - changelog.d/15955.misc | 1 - changelog.d/15957.bugfix | 1 - changelog.d/15958.misc | 1 - changelog.d/15960.misc | 1 - changelog.d/15961.misc | 1 - changelog.d/15968.misc | 1 - changelog.d/15969.feature | 1 - changelog.d/15971.misc | 1 - changelog.d/15973.bugfix | 1 - changelog.d/15975.bugfix | 1 - changelog.d/15981.doc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 33 files changed, 68 insertions(+), 31 deletions(-) delete mode 100644 changelog.d/15708.feature delete mode 100644 changelog.d/15820.bugfix delete mode 100644 changelog.d/15884.misc delete mode 100644 changelog.d/15887.misc delete mode 100644 changelog.d/15909.misc delete mode 100644 changelog.d/15911.feature delete mode 100644 changelog.d/15912.feature delete mode 100644 changelog.d/15913.feature delete mode 100644 changelog.d/15921.doc delete mode 100644 changelog.d/15922.misc delete mode 100644 changelog.d/15924.feature delete mode 100644 changelog.d/15925.bugfix delete mode 100644 changelog.d/15926.misc delete mode 100644 changelog.d/15928.removal delete mode 100644 changelog.d/15929.feature delete mode 100644 changelog.d/15933.misc delete mode 100644 changelog.d/15938.doc delete mode 100644 changelog.d/15940.misc delete mode 100644 changelog.d/15952.misc delete mode 100644 changelog.d/15955.misc delete mode 100644 changelog.d/15957.bugfix delete mode 100644 changelog.d/15958.misc delete mode 100644 changelog.d/15960.misc delete mode 100644 changelog.d/15961.misc delete mode 100644 changelog.d/15968.misc delete mode 100644 changelog.d/15969.feature delete mode 100644 changelog.d/15971.misc delete mode 100644 changelog.d/15973.bugfix delete mode 100644 changelog.d/15975.bugfix delete mode 100644 changelog.d/15981.doc diff --git a/CHANGES.md b/CHANGES.md index f379c994f0..f94bacc31b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +# Synapse 1.89.0rc1 (2023-07-25) + +### Features + +- Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924)) +- Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911)) +- Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912)) +- Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969)) +- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly and H-Shay. ([\#15929](https://github.com/matrix-org/synapse/issues/15929)) + +### Bugfixes + +- Fix long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820)) +- Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925)) +- Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957)) +- Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973)) +- Fix bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975)) + +### Improved Documentation + +- Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921)) +- Improve the documentation for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938)) +- Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981)) + +### Deprecations and Removals + +- Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15928](https://github.com/matrix-org/synapse/issues/15928)) + +### Internal Changes + +- Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884)) +- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887)) +- Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909)) +- Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922)) +- Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926)) +- Fix bug with read/write lock implementation. This is currently unused so has no observable effects. ([\#15933](https://github.com/matrix-org/synapse/issues/15933), [\#15958](https://github.com/matrix-org/synapse/issues/15958)) +- Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940)) +- Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952)) +- Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955)) +- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960)) +- Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961)) +- Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968)) +- Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971)) + +### Updates to locked dependencies + +* Bump anyhow from 1.0.71 to 1.0.72. ([\#15949](https://github.com/matrix-org/synapse/issues/15949)) +* Bump click from 8.1.3 to 8.1.6. ([\#15984](https://github.com/matrix-org/synapse/issues/15984)) +* Bump cryptography from 41.0.1 to 41.0.2. ([\#15943](https://github.com/matrix-org/synapse/issues/15943)) +* Bump jsonschema from 4.17.3 to 4.18.3. ([\#15948](https://github.com/matrix-org/synapse/issues/15948)) +* Bump pillow from 9.4.0 to 10.0.0. ([\#15986](https://github.com/matrix-org/synapse/issues/15986)) +* Bump prometheus-client from 0.17.0 to 0.17.1. ([\#15945](https://github.com/matrix-org/synapse/issues/15945)) +* Bump pydantic from 1.10.10 to 1.10.11. ([\#15946](https://github.com/matrix-org/synapse/issues/15946)) +* Bump pygithub from 1.58.2 to 1.59.0. ([\#15834](https://github.com/matrix-org/synapse/issues/15834)) +* Bump pyo3-log from 0.8.2 to 0.8.3. ([\#15951](https://github.com/matrix-org/synapse/issues/15951)) +* Bump sentry-sdk from 1.26.0 to 1.28.1. ([\#15985](https://github.com/matrix-org/synapse/issues/15985)) +* Bump serde_json from 1.0.100 to 1.0.103. ([\#15950](https://github.com/matrix-org/synapse/issues/15950)) +* Bump types-pillow from 9.5.0.4 to 10.0.0.1. ([\#15932](https://github.com/matrix-org/synapse/issues/15932)) +* Bump types-requests from 2.31.0.1 to 2.31.0.2. ([\#15983](https://github.com/matrix-org/synapse/issues/15983)) +* Bump typing-extensions from 4.5.0 to 4.7.1. ([\#15947](https://github.com/matrix-org/synapse/issues/15947)) + # Synapse 1.88.0 (2023-07-18) This release diff --git a/changelog.d/15708.feature b/changelog.d/15708.feature deleted file mode 100644 index 06a6c959ab..0000000000 --- a/changelog.d/15708.feature +++ /dev/null @@ -1 +0,0 @@ -Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little. diff --git a/changelog.d/15820.bugfix b/changelog.d/15820.bugfix deleted file mode 100644 index d259d32061..0000000000 --- a/changelog.d/15820.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where remote invites weren't correctly pushed. diff --git a/changelog.d/15884.misc b/changelog.d/15884.misc deleted file mode 100644 index 8e73a9a6cd..0000000000 --- a/changelog.d/15884.misc +++ /dev/null @@ -1 +0,0 @@ -Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. diff --git a/changelog.d/15887.misc b/changelog.d/15887.misc deleted file mode 100644 index 7c1005078e..0000000000 --- a/changelog.d/15887.misc +++ /dev/null @@ -1 +0,0 @@ -Fix background schema updates failing over a large upgrade gap. diff --git a/changelog.d/15909.misc b/changelog.d/15909.misc deleted file mode 100644 index ba36a97442..0000000000 --- a/changelog.d/15909.misc +++ /dev/null @@ -1 +0,0 @@ -Document which Python version runs on a given Linux distribution so we can more easily clean up later. diff --git a/changelog.d/15911.feature b/changelog.d/15911.feature deleted file mode 100644 index b24077c6c3..0000000000 --- a/changelog.d/15911.feature +++ /dev/null @@ -1 +0,0 @@ -Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). diff --git a/changelog.d/15912.feature b/changelog.d/15912.feature deleted file mode 100644 index 0faed11eda..0000000000 --- a/changelog.d/15912.feature +++ /dev/null @@ -1 +0,0 @@ -Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). diff --git a/changelog.d/15913.feature b/changelog.d/15913.feature deleted file mode 100644 index 0d77fae2dc..0000000000 --- a/changelog.d/15913.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. diff --git a/changelog.d/15921.doc b/changelog.d/15921.doc deleted file mode 100644 index 02f34c73d5..0000000000 --- a/changelog.d/15921.doc +++ /dev/null @@ -1 +0,0 @@ -Better clarify how to run a worker instance (pass both configs). diff --git a/changelog.d/15922.misc b/changelog.d/15922.misc deleted file mode 100644 index 93fc644877..0000000000 --- a/changelog.d/15922.misc +++ /dev/null @@ -1 +0,0 @@ -Add details to warning in log when we fail to fetch an alias. diff --git a/changelog.d/15924.feature b/changelog.d/15924.feature deleted file mode 100644 index 06a6c959ab..0000000000 --- a/changelog.d/15924.feature +++ /dev/null @@ -1 +0,0 @@ -Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little. diff --git a/changelog.d/15925.bugfix b/changelog.d/15925.bugfix deleted file mode 100644 index e3ef783576..0000000000 --- a/changelog.d/15925.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. diff --git a/changelog.d/15926.misc b/changelog.d/15926.misc deleted file mode 100644 index bf4c0fa5d0..0000000000 --- a/changelog.d/15926.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unneeded `__init__`. diff --git a/changelog.d/15928.removal b/changelog.d/15928.removal deleted file mode 100644 index 5563213d31..0000000000 --- a/changelog.d/15928.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. diff --git a/changelog.d/15929.feature b/changelog.d/15929.feature deleted file mode 100644 index c3aaeae66e..0000000000 --- a/changelog.d/15929.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly and H-Shay. diff --git a/changelog.d/15933.misc b/changelog.d/15933.misc deleted file mode 100644 index 8457994c68..0000000000 --- a/changelog.d/15933.misc +++ /dev/null @@ -1 +0,0 @@ -Fix bug with read/write lock implementation. This is currently unused so has no observable effects. diff --git a/changelog.d/15938.doc b/changelog.d/15938.doc deleted file mode 100644 index 8d99e5f4ea..0000000000 --- a/changelog.d/15938.doc +++ /dev/null @@ -1 +0,0 @@ -Improve the documentation for the login as a user admin API. diff --git a/changelog.d/15940.misc b/changelog.d/15940.misc deleted file mode 100644 index eac008eb3e..0000000000 --- a/changelog.d/15940.misc +++ /dev/null @@ -1 +0,0 @@ -Unbreak the nix development environment by pinning the Rust version to 1.70.0. \ No newline at end of file diff --git a/changelog.d/15952.misc b/changelog.d/15952.misc deleted file mode 100644 index c4160977cb..0000000000 --- a/changelog.d/15952.misc +++ /dev/null @@ -1 +0,0 @@ -Update presence metrics to differentiate remote vs local users. diff --git a/changelog.d/15955.misc b/changelog.d/15955.misc deleted file mode 100644 index dc4f687e0a..0000000000 --- a/changelog.d/15955.misc +++ /dev/null @@ -1 +0,0 @@ -Stop reading from column `user_id` of table `profiles`. diff --git a/changelog.d/15957.bugfix b/changelog.d/15957.bugfix deleted file mode 100644 index edbe2a956a..0000000000 --- a/changelog.d/15957.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. \ No newline at end of file diff --git a/changelog.d/15958.misc b/changelog.d/15958.misc deleted file mode 100644 index 8457994c68..0000000000 --- a/changelog.d/15958.misc +++ /dev/null @@ -1 +0,0 @@ -Fix bug with read/write lock implementation. This is currently unused so has no observable effects. diff --git a/changelog.d/15960.misc b/changelog.d/15960.misc deleted file mode 100644 index 7cac24a3c5..0000000000 --- a/changelog.d/15960.misc +++ /dev/null @@ -1 +0,0 @@ -Ensure a long state res does not starve CPU by occasionally yielding to the reactor. diff --git a/changelog.d/15961.misc b/changelog.d/15961.misc deleted file mode 100644 index 035a330446..0000000000 --- a/changelog.d/15961.misc +++ /dev/null @@ -1 +0,0 @@ -Build packages for Debian Trixie. diff --git a/changelog.d/15968.misc b/changelog.d/15968.misc deleted file mode 100644 index af7132cc72..0000000000 --- a/changelog.d/15968.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the amount of state we pull out. diff --git a/changelog.d/15969.feature b/changelog.d/15969.feature deleted file mode 100644 index 0d77fae2dc..0000000000 --- a/changelog.d/15969.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. diff --git a/changelog.d/15971.misc b/changelog.d/15971.misc deleted file mode 100644 index 4afd8922fc..0000000000 --- a/changelog.d/15971.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up updating state in large rooms. diff --git a/changelog.d/15973.bugfix b/changelog.d/15973.bugfix deleted file mode 100644 index c9280d0037..0000000000 --- a/changelog.d/15973.bugfix +++ /dev/null @@ -1 +0,0 @@ -Properly handle redactions of creation events. diff --git a/changelog.d/15975.bugfix b/changelog.d/15975.bugfix deleted file mode 100644 index 59738cca0a..0000000000 --- a/changelog.d/15975.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. diff --git a/changelog.d/15981.doc b/changelog.d/15981.doc deleted file mode 100644 index 374a5bd12f..0000000000 --- a/changelog.d/15981.doc +++ /dev/null @@ -1 +0,0 @@ -Fix broken Arch Linux package link. Contributed by @SnipeXandrej. diff --git a/debian/changelog b/debian/changelog index a369e0e5c2..384edbdab1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium + + * New Synapse release 1.89.0rc1. + + -- Synapse Packaging team Tue, 25 Jul 2023 14:31:07 +0200 + matrix-synapse-py3 (1.88.0) stable; urgency=medium * New Synapse release 1.88.0. diff --git a/pyproject.toml b/pyproject.toml index 4382ff38e5..89c5edb4db 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.88.0" +version = "1.89.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 8ebfd577e237eb7b364a692c88e14bc8820980d1 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 26 Jul 2023 14:51:44 +0200 Subject: [PATCH 258/562] Bump DB version to 79 since synapse v1.88 was already there (#15998) --- changelog.d/15998.bugfix | 1 + synapse/storage/schema/__init__.py | 6 +++++- .../03_read_write_locks_triggers.sql.postgres} | 13 +++++++------ .../03_read_write_locks_triggers.sql.sqlite} | 12 ++++++------ .../04_mitigate_stream_ordering_update_race.py} | 6 +++--- .../05_read_write_locks_triggers.sql.postgres} | 0 .../05_read_write_locks_triggers.sql.sqlite} | 0 7 files changed, 22 insertions(+), 16 deletions(-) create mode 100644 changelog.d/15998.bugfix rename synapse/storage/schema/main/delta/{78/04_read_write_locks_triggers.sql.postgres => 79/03_read_write_locks_triggers.sql.postgres} (86%) rename synapse/storage/schema/main/delta/{78/04_read_write_locks_triggers.sql.sqlite => 79/03_read_write_locks_triggers.sql.sqlite} (83%) rename synapse/storage/schema/main/delta/{78/05_mitigate_stream_ordering_update_race.py => 79/04_mitigate_stream_ordering_update_race.py} (92%) rename synapse/storage/schema/main/delta/{78/06_read_write_locks_triggers.sql.postgres => 79/05_read_write_locks_triggers.sql.postgres} (100%) rename synapse/storage/schema/main/delta/{78/06_read_write_locks_triggers.sql.sqlite => 79/05_read_write_locks_triggers.sql.sqlite} (100%) diff --git a/changelog.d/15998.bugfix b/changelog.d/15998.bugfix new file mode 100644 index 0000000000..b4ad8d776b --- /dev/null +++ b/changelog.d/15998.bugfix @@ -0,0 +1 @@ +Internal changelog to be removed. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index fc190a8b13..d3ec648f6d 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 78 # remember to update the list below when updating +SCHEMA_VERSION = 79 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -106,6 +106,10 @@ Changes in SCHEMA_VERSION = 77 Changes in SCHEMA_VERSION = 78 - Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters + +Changes in SCHEMA_VERSION = 79 + - Add tables to handle in DB read-write locks. + - Add some mitigations for a painful race between foreground and background updates, cf #15677. """ diff --git a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres similarity index 86% rename from synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres rename to synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres index e1cc3469a4..7df07ab0da 100644 --- a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.postgres +++ b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres @@ -44,7 +44,7 @@ -- A table to track whether a lock is currently acquired, and if so whether its -- in read or write mode. -CREATE TABLE worker_read_write_locks_mode ( +CREATE TABLE IF NOT EXISTS worker_read_write_locks_mode ( lock_name TEXT NOT NULL, lock_key TEXT NOT NULL, -- Whether this lock is in read (false) or write (true) mode @@ -55,14 +55,14 @@ CREATE TABLE worker_read_write_locks_mode ( ); -- Ensure that we can only have one row per lock -CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key); +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key); -- We need this (redundant) constraint so that we can have a foreign key -- constraint against this table. -CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock); +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock); -- A table to track who has currently acquired a given lock. -CREATE TABLE worker_read_write_locks ( +CREATE TABLE IF NOT EXISTS worker_read_write_locks ( lock_name TEXT NOT NULL, lock_key TEXT NOT NULL, -- We write the instance name to ease manual debugging, we don't ever read @@ -84,9 +84,9 @@ CREATE TABLE worker_read_write_locks ( FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock) ); -CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token); +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token); -- Ensures that only one instance can acquire a lock in write mode at a time. -CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock; +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock; -- Add a foreign key constraint to ensure that if a lock is in @@ -97,5 +97,6 @@ CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lo -- We only add to PostgreSQL as SQLite does not support adding constraints -- after table creation, and so doesn't support "circular" foreign key -- constraints. +ALTER TABLE worker_read_write_locks_mode DROP CONSTRAINT IF EXISTS worker_read_write_locks_mode_foreign; ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED; diff --git a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite similarity index 83% rename from synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite rename to synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite index b15432f576..95f9dbf120 100644 --- a/synapse/storage/schema/main/delta/78/04_read_write_locks_triggers.sql.sqlite +++ b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite @@ -22,7 +22,7 @@ -- A table to track whether a lock is currently acquired, and if so whether its -- in read or write mode. -CREATE TABLE worker_read_write_locks_mode ( +CREATE TABLE IF NOT EXISTS worker_read_write_locks_mode ( lock_name TEXT NOT NULL, lock_key TEXT NOT NULL, -- Whether this lock is in read (false) or write (true) mode @@ -38,14 +38,14 @@ CREATE TABLE worker_read_write_locks_mode ( ); -- Ensure that we can only have one row per lock -CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key); +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key); -- We need this (redundant) constraint so that we can have a foreign key -- constraint against this table. -CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock); +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock); -- A table to track who has currently acquired a given lock. -CREATE TABLE worker_read_write_locks ( +CREATE TABLE IF NOT EXISTS worker_read_write_locks ( lock_name TEXT NOT NULL, lock_key TEXT NOT NULL, -- We write the instance name to ease manual debugging, we don't ever read @@ -67,6 +67,6 @@ CREATE TABLE worker_read_write_locks ( FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock) ); -CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token); +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token); -- Ensures that only one instance can acquire a lock in write mode at a time. -CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock; +CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock; diff --git a/synapse/storage/schema/main/delta/78/05_mitigate_stream_ordering_update_race.py b/synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py similarity index 92% rename from synapse/storage/schema/main/delta/78/05_mitigate_stream_ordering_update_race.py rename to synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py index 1a22f6a404..ae63585847 100644 --- a/synapse/storage/schema/main/delta/78/05_mitigate_stream_ordering_update_race.py +++ b/synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py @@ -37,17 +37,17 @@ def run_create( # after the background update has finished if res: drop_cse_sql = """ - ALTER TABLE current_state_events DROP CONSTRAINT event_stream_ordering_fkey + ALTER TABLE current_state_events DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey """ cur.execute(drop_cse_sql) drop_lcm_sql = """ - ALTER TABLE local_current_membership DROP CONSTRAINT event_stream_ordering_fkey + ALTER TABLE local_current_membership DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey """ cur.execute(drop_lcm_sql) drop_rm_sql = """ - ALTER TABLE room_memberships DROP CONSTRAINT event_stream_ordering_fkey + ALTER TABLE room_memberships DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey """ cur.execute(drop_rm_sql) diff --git a/synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres similarity index 100% rename from synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.postgres rename to synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres diff --git a/synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite similarity index 100% rename from synapse/storage/schema/main/delta/78/06_read_write_locks_triggers.sql.sqlite rename to synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite From d4ea465496820457fd20a4138a1cc92cc2ad0de0 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 26 Jul 2023 14:54:08 +0200 Subject: [PATCH 259/562] Remove changelog file --- changelog.d/15998.bugfix | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/15998.bugfix diff --git a/changelog.d/15998.bugfix b/changelog.d/15998.bugfix deleted file mode 100644 index b4ad8d776b..0000000000 --- a/changelog.d/15998.bugfix +++ /dev/null @@ -1 +0,0 @@ -Internal changelog to be removed. From 76e392b0fa6ae5f906f806b86767a7e9b800c4c2 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 26 Jul 2023 16:13:39 +0200 Subject: [PATCH 260/562] Edit changelog --- CHANGES.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f94bacc31b..c0570b1fd0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,24 +2,26 @@ ### Features -- Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924)) +- Add Unix Socket support for HTTP Replication Listeners. [Document and provide usage instructions](https://matrix-org.github.io/synapse/v1.89/usage/configuration/config_documentation.html#listeners) for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924)) - Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911)) - Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912)) - Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969)) -- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly and H-Shay. ([\#15929](https://github.com/matrix-org/synapse/issues/15929)) +- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly, H-Shay and poljar. ([\#15929](https://github.com/matrix-org/synapse/issues/15929)) ### Bugfixes -- Fix long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820)) +- Fix a long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820)) +- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887)) - Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925)) - Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957)) +- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960)) - Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973)) -- Fix bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975)) +- Fix a bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975)) ### Improved Documentation - Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921)) -- Improve the documentation for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938)) +- Improve [the documentation](https://matrix-org.github.io/synapse/v1.89/admin_api/user_admin_api.html#login-as-a-user) for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938)) - Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981)) ### Deprecations and Removals @@ -29,7 +31,6 @@ ### Internal Changes - Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884)) -- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887)) - Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909)) - Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922)) - Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926)) @@ -37,7 +38,6 @@ - Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940)) - Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952)) - Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955)) -- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960)) - Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961)) - Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968)) - Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971)) From 8d2a5586f7f16122fb441247e15b081817145b26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 26 Jul 2023 15:16:39 +0100 Subject: [PATCH 261/562] Bump serde from 1.0.171 to 1.0.175 (#15982) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.171 to 1.0.175. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.171...v1.0.175) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2264e67245..b29a72a3b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.171" +version = "1.0.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +checksum = "5d25439cd7397d044e2748a6fe2432b5e85db703d6d097bd014b3c0ad1ebff0b" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.171" +version = "1.0.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +checksum = "b23f7ade6f110613c0d63858ddb8b94c1041f550eab58a16b371bdf2c9c80ab4" dependencies = [ "proc-macro2", "quote", From 96529c42368a6153a92330c3f03be5b02ce4653c Mon Sep 17 00:00:00 2001 From: Mo Balaa Date: Wed, 26 Jul 2023 11:16:12 -0500 Subject: [PATCH 262/562] Add synapse version as Docker container label (#15972) Co-authored-by: Mo Balaa --- .github/workflows/docker.yml | 8 +++++++- changelog.d/15972.docker | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15972.docker diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 602f5e1759..cf98a6a86f 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,6 +28,10 @@ jobs: - name: Inspect builder run: docker buildx inspect + + - name: Extract version from pyproject.toml + run: | + echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV - name: Log in to DockerHub uses: docker/login-action@v2 @@ -61,7 +65,9 @@ jobs: uses: docker/build-push-action@v4 with: push: true - labels: "gitsha1=${{ github.sha }}" + labels: | + gitsha1=${{ github.sha }} + org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }} tags: "${{ steps.set-tag.outputs.tags }}" file: "docker/Dockerfile" platforms: linux/amd64,linux/arm64 diff --git a/changelog.d/15972.docker b/changelog.d/15972.docker new file mode 100644 index 0000000000..7fd9707deb --- /dev/null +++ b/changelog.d/15972.docker @@ -0,0 +1 @@ +Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. From 58f830511486271da72543dd20676b702bc52b2f Mon Sep 17 00:00:00 2001 From: Anshul Madnawat <100751856+anshulm333@users.noreply.github.com> Date: Thu, 27 Jul 2023 00:15:47 +0530 Subject: [PATCH 263/562] Inline SQL queries using boolean parameters (#15525) SQLite now supports TRUE and FALSE constants, simplify some queries by inlining those instead of passing them as arguments. --- changelog.d/15525.misc | 1 + synapse/storage/databases/main/event_federation.py | 3 +-- synapse/storage/databases/main/events.py | 12 ++++++------ synapse/storage/databases/main/purge_events.py | 9 ++++----- synapse/storage/databases/main/push_rule.py | 6 +++--- synapse/storage/databases/main/registration.py | 4 ++-- synapse/storage/databases/main/room.py | 10 +++++----- synapse/storage/databases/main/stream.py | 4 ++-- 8 files changed, 24 insertions(+), 25 deletions(-) create mode 100644 changelog.d/15525.misc diff --git a/changelog.d/15525.misc b/changelog.d/15525.misc new file mode 100644 index 0000000000..67ab0cf62f --- /dev/null +++ b/changelog.d/15525.misc @@ -0,0 +1 @@ +Update SQL queries to inline boolean parameters as supported in SQLite 3.27. diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index b2cda52ce5..534dc32413 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -843,7 +843,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas * because the schema change is in a background update, it's not * necessarily safe to assume that it will have been completed. */ - AND edge.is_state is ? /* False */ + AND edge.is_state is FALSE /** * We only want backwards extremities that are older than or at * the same position of the given `current_depth` (where older @@ -886,7 +886,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas sql, ( room_id, - False, current_depth, self._clock.time_msec(), BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 2b83a69426..bd3f14fb71 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1455,8 +1455,8 @@ class PersistEventsStore: }, ) - sql = "UPDATE events SET outlier = ? WHERE event_id = ?" - txn.execute(sql, (False, event.event_id)) + sql = "UPDATE events SET outlier = FALSE WHERE event_id = ?" + txn.execute(sql, (event.event_id,)) # Update the event_backward_extremities table now that this # event isn't an outlier any more. @@ -1549,13 +1549,13 @@ class PersistEventsStore: for event, _ in events_and_contexts if not event.internal_metadata.is_redacted() ] - sql = "UPDATE redactions SET have_censored = ? WHERE " + sql = "UPDATE redactions SET have_censored = FALSE WHERE " clause, args = make_in_list_sql_clause( self.database_engine, "redacts", unredacted_events, ) - txn.execute(sql + clause, [False] + args) + txn.execute(sql + clause, args) self.db_pool.simple_insert_many_txn( txn, @@ -2318,14 +2318,14 @@ class PersistEventsStore: " SELECT 1 FROM events" " LEFT JOIN event_edges edge" " ON edge.event_id = events.event_id" - " WHERE events.event_id = ? AND events.room_id = ? AND (events.outlier = ? OR edge.event_id IS NULL)" + " WHERE events.event_id = ? AND events.room_id = ? AND (events.outlier = FALSE OR edge.event_id IS NULL)" " )" ) txn.execute_batch( query, [ - (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False) + (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id) for ev in events for e_id in ev.prev_event_ids() if not ev.internal_metadata.is_outlier() diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 9773c1fcd2..b52f48cf04 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -249,12 +249,11 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # Mark all state and own events as outliers logger.info("[purge] marking remaining events as outliers") txn.execute( - "UPDATE events SET outlier = ?" + "UPDATE events SET outlier = TRUE" " WHERE event_id IN (" - " SELECT event_id FROM events_to_purge " - " WHERE NOT should_delete" - ")", - (True,), + " SELECT event_id FROM events_to_purge " + " WHERE NOT should_delete" + ")" ) # synapse tries to take out an exclusive lock on room_depth whenever it diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index e098ceea3c..c13c0bc7d7 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -560,19 +560,19 @@ class PushRuleStore(PushRulesWorkerStore): if isinstance(self.database_engine, PostgresEngine): sql = """ INSERT INTO push_rules_enable (id, user_name, rule_id, enabled) - VALUES (?, ?, ?, ?) + VALUES (?, ?, ?, 1) ON CONFLICT DO NOTHING """ elif isinstance(self.database_engine, Sqlite3Engine): sql = """ INSERT OR IGNORE INTO push_rules_enable (id, user_name, rule_id, enabled) - VALUES (?, ?, ?, ?) + VALUES (?, ?, ?, 1) """ else: raise RuntimeError("Unknown database engine") new_enable_id = self._push_rules_enable_id_gen.get_next() - txn.execute(sql, (new_enable_id, user_id, rule_id, 1)) + txn.execute(sql, (new_enable_id, user_id, rule_id)) async def delete_push_rule(self, user_id: str, rule_id: str) -> None: """ diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 676d03bb7e..c582cf0573 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -454,9 +454,9 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): ) -> List[Tuple[str, int]]: sql = ( "SELECT user_id, expiration_ts_ms FROM account_validity" - " WHERE email_sent = ? AND (expiration_ts_ms - ?) <= ?" + " WHERE email_sent = FALSE AND (expiration_ts_ms - ?) <= ?" ) - values = [False, now_ms, renew_at] + values = [now_ms, renew_at] txn.execute(sql, values) return cast(List[Tuple[str, int]], txn.fetchall()) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 830658f328..719e11aea6 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -936,11 +936,11 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): JOIN event_json USING (room_id, event_id) WHERE room_id = ? %(where_clause)s - AND contains_url = ? AND outlier = ? + AND contains_url = TRUE AND outlier = FALSE ORDER BY stream_ordering DESC LIMIT ? """ - txn.execute(sql % {"where_clause": ""}, (room_id, True, False, 100)) + txn.execute(sql % {"where_clause": ""}, (room_id, 100)) local_media_mxcs = [] remote_media_mxcs = [] @@ -976,7 +976,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn.execute( sql % {"where_clause": "AND stream_ordering < ?"}, - (room_id, next_token, True, False, 100), + (room_id, next_token, 100), ) return local_media_mxcs, remote_media_mxcs @@ -1086,9 +1086,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): # set quarantine if quarantined_by is not None: - sql += "AND safe_from_quarantine = ?" + sql += "AND safe_from_quarantine = FALSE" txn.executemany( - sql, [(quarantined_by, media_id, False) for media_id in local_mxcs] + sql, [(quarantined_by, media_id) for media_id in local_mxcs] ) # remove from quarantine else: diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 92cbe262a6..5a3611c415 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1401,7 +1401,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): `to_token`), or `limit` is zero. """ - args = [False, room_id] + args: List[Any] = [room_id] order, from_bound, to_bound = generate_pagination_bounds( direction, from_token, to_token @@ -1475,7 +1475,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event.topological_ordering, event.stream_ordering FROM events AS event %(join_clause)s - WHERE event.outlier = ? AND event.room_id = ? AND %(bounds)s + WHERE event.outlier = FALSE AND event.room_id = ? AND %(bounds)s ORDER BY event.topological_ordering %(order)s, event.stream_ordering %(order)s LIMIT ? """ % { From f98f4f2e16a01928e0d442fef4669a1e3fca9b0f Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 26 Jul 2023 12:59:47 -0700 Subject: [PATCH 264/562] Remove support for legacy application service paths (#15964) --- changelog.d/15964.removal | 1 + synapse/appservice/api.py | 82 +++++------------------------------- tests/appservice/test_api.py | 53 ----------------------- 3 files changed, 12 insertions(+), 124 deletions(-) create mode 100644 changelog.d/15964.removal diff --git a/changelog.d/15964.removal b/changelog.d/15964.removal new file mode 100644 index 0000000000..7613afe505 --- /dev/null +++ b/changelog.d/15964.removal @@ -0,0 +1 @@ +Remove support for legacy application service paths. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 5fb3d5083d..359999f680 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -17,8 +17,6 @@ import urllib.parse from typing import ( TYPE_CHECKING, Any, - Awaitable, - Callable, Dict, Iterable, List, @@ -30,7 +28,7 @@ from typing import ( ) from prometheus_client import Counter -from typing_extensions import Concatenate, ParamSpec, TypeGuard +from typing_extensions import ParamSpec, TypeGuard from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind from synapse.api.errors import CodeMessageException, HttpResponseException @@ -80,9 +78,7 @@ sent_todevice_counter = Counter( HOUR_IN_MS = 60 * 60 * 1000 - APP_SERVICE_PREFIX = "/_matrix/app/v1" -APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable" P = ParamSpec("P") R = TypeVar("R") @@ -128,47 +124,6 @@ class ApplicationServiceApi(SimpleHttpClient): hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS ) - async def _send_with_fallbacks( - self, - service: "ApplicationService", - prefixes: List[str], - path: str, - func: Callable[Concatenate[str, P], Awaitable[R]], - *args: P.args, - **kwargs: P.kwargs, - ) -> R: - """ - Attempt to call an application service with multiple paths, falling back - until one succeeds. - - Args: - service: The appliacation service, this provides the base URL. - prefixes: A last of paths to try in order for the requests. - path: A suffix to append to each prefix. - func: The function to call, the first argument will be the full - endpoint to fetch. Other arguments are provided by args/kwargs. - - Returns: - The return value of func. - """ - for i, prefix in enumerate(prefixes, start=1): - uri = f"{service.url}{prefix}{path}" - try: - return await func(uri, *args, **kwargs) - except HttpResponseException as e: - # If an error is received that is due to an unrecognised path, - # fallback to next path (if one exists). Otherwise, consider it - # a legitimate error and raise. - if i < len(prefixes) and is_unknown_endpoint(e): - continue - raise - except Exception: - # Unexpected exceptions get sent to the caller. - raise - - # The function should always exit via the return or raise above this. - raise RuntimeError("Unexpected fallback behaviour. This should never be seen.") - async def query_user(self, service: "ApplicationService", user_id: str) -> bool: if service.url is None: return False @@ -177,11 +132,8 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None try: - response = await self._send_with_fallbacks( - service, - [APP_SERVICE_PREFIX, ""], - f"/users/{urllib.parse.quote(user_id)}", - self.get_json, + response = await self.get_json( + f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}", {"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -203,11 +155,8 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None try: - response = await self._send_with_fallbacks( - service, - [APP_SERVICE_PREFIX, ""], - f"/rooms/{urllib.parse.quote(alias)}", - self.get_json, + response = await self.get_json( + f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}", {"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -245,11 +194,8 @@ class ApplicationServiceApi(SimpleHttpClient): **fields, b"access_token": service.hs_token, } - response = await self._send_with_fallbacks( - service, - [APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX], - f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}", - self.get_json, + response = await self.get_json( + f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}", args=args, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -285,11 +231,8 @@ class ApplicationServiceApi(SimpleHttpClient): # This is required by the configuration. assert service.hs_token is not None try: - info = await self._send_with_fallbacks( - service, - [APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX], - f"/thirdparty/protocol/{urllib.parse.quote(protocol)}", - self.get_json, + info = await self.get_json( + f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}", {"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -401,11 +344,8 @@ class ApplicationServiceApi(SimpleHttpClient): } try: - await self._send_with_fallbacks( - service, - [APP_SERVICE_PREFIX, ""], - f"/transactions/{urllib.parse.quote(str(txn_id))}", - self.put_json, + await self.put_json( + f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}", json_body=body, args={"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 15fce165b6..807dc2f21c 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -16,7 +16,6 @@ from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor -from synapse.api.errors import HttpResponseException from synapse.appservice import ApplicationService from synapse.server import HomeServer from synapse.types import JsonDict @@ -107,58 +106,6 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): self.assertEqual(self.request_url, URL_LOCATION) self.assertEqual(result, SUCCESS_RESULT_LOCATION) - def test_fallback(self) -> None: - """ - Tests that the fallback to legacy URLs works. - """ - SUCCESS_RESULT_USER = [ - { - "protocol": PROTOCOL, - "userid": "@a:user", - "fields": { - "more": "fields", - }, - } - ] - - URL_USER = f"{URL}/_matrix/app/v1/thirdparty/user/{PROTOCOL}" - FALLBACK_URL_USER = f"{URL}/_matrix/app/unstable/thirdparty/user/{PROTOCOL}" - - self.request_url = None - self.v1_seen = False - - async def get_json( - url: str, - args: Mapping[Any, Any], - headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], - ) -> List[JsonDict]: - # Ensure the access token is passed as both a header and query arg. - if not headers.get("Authorization") or not args.get(b"access_token"): - raise RuntimeError("Access token not provided") - - self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) - self.assertEqual(args.get(b"access_token"), TOKEN) - self.request_url = url - if url == URL_USER: - self.v1_seen = True - raise HttpResponseException(404, "NOT_FOUND", b"NOT_FOUND") - elif url == FALLBACK_URL_USER: - return SUCCESS_RESULT_USER - else: - raise RuntimeError( - "URL provided was invalid. This should never be seen." - ) - - # We assign to a method, which mypy doesn't like. - self.api.get_json = Mock(side_effect=get_json) # type: ignore[assignment] - - result = self.get_success( - self.api.query_3pe(self.service, "user", PROTOCOL, {b"some": [b"field"]}) - ) - self.assertTrue(self.v1_seen) - self.assertEqual(self.request_url, FALLBACK_URL_USER) - self.assertEqual(result, SUCCESS_RESULT_USER) - def test_claim_keys(self) -> None: """ Tests that the /keys/claim response is properly parsed for missing From f9f3e89354915d6f3b002355c96ac74f37cc85b9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 27 Jul 2023 13:47:48 +0100 Subject: [PATCH 265/562] Attempt to fix labelling in docker workflow (#16009) --- .github/workflows/docker.yml | 8 +++++++- changelog.d/16009.docker | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16009.docker diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cf98a6a86f..8a69dc4986 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,8 +28,14 @@ jobs: - name: Inspect builder run: docker buildx inspect - + + - name: Checkout repository + uses: actions/checkout@v3 + - name: Extract version from pyproject.toml + # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see + # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell + shell: bash run: | echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV diff --git a/changelog.d/16009.docker b/changelog.d/16009.docker new file mode 100644 index 0000000000..7fd9707deb --- /dev/null +++ b/changelog.d/16009.docker @@ -0,0 +1 @@ +Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. From a461f1f8467b44d67080c2688dbaad74199fe573 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Thu, 27 Jul 2023 14:51:26 +0200 Subject: [PATCH 266/562] Update PyYAML to 6.0.1 (#16011) --- changelog.d/16011.misc | 1 + poetry.lock | 82 +++++++++++++++++++++--------------------- 2 files changed, 42 insertions(+), 41 deletions(-) create mode 100644 changelog.d/16011.misc diff --git a/changelog.d/16011.misc b/changelog.d/16011.misc new file mode 100644 index 0000000000..8a8d9822c6 --- /dev/null +++ b/changelog.d/16011.misc @@ -0,0 +1 @@ +Update PyYAML to 6.0.1. diff --git a/poetry.lock b/poetry.lock index d5b30a11c4..05139e60d5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2072,51 +2072,51 @@ files = [ [[package]] name = "pyyaml" -version = "6.0" +version = "6.0.1" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.6" files = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] [[package]] From a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Thu, 27 Jul 2023 15:45:05 +0200 Subject: [PATCH 267/562] Fix 404 on /profile when the display name is empty but not the avatar (#16012) --- changelog.d/16012.bugfix | 1 + synapse/handlers/profile.py | 2 +- tests/handlers/test_profile.py | 10 ++++++++++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16012.bugfix diff --git a/changelog.d/16012.bugfix b/changelog.d/16012.bugfix new file mode 100644 index 0000000000..44ca9377ff --- /dev/null +++ b/changelog.d/16012.bugfix @@ -0,0 +1 @@ +Fix 404 not found code returned on profile endpoint when the display name is empty but not the avatar URL. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a7f8c5e636..c7fe101cd9 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -68,7 +68,7 @@ class ProfileHandler: if self.hs.is_mine(target_user): profileinfo = await self.store.get_profileinfo(target_user) - if profileinfo.display_name is None: + if profileinfo.display_name is None and profileinfo.avatar_url is None: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) return { diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 196ceb0b82..ec2f5d30be 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -179,6 +179,16 @@ class ProfileTestCase(unittest.HomeserverTestCase): self.assertEqual("http://my.server/me.png", avatar_url) + def test_get_profile_empty_displayname(self) -> None: + self.get_success(self.store.set_profile_displayname(self.frank, None)) + self.get_success( + self.store.set_profile_avatar_url(self.frank, "http://my.server/me.png") + ) + + profile = self.get_success(self.handler.get_profile(self.frank.to_string())) + + self.assertEqual("http://my.server/me.png", profile["avatar_url"]) + def test_set_my_avatar(self) -> None: self.get_success( self.handler.set_avatar_url( From 68b2611783ab00fdad567654a95492442722c106 Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 27 Jul 2023 15:08:46 -0700 Subject: [PATCH 268/562] Clarify comment on key uploads over replication (#16016) --- changelog.d/16016.doc | 2 ++ synapse/replication/http/devices.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16016.doc diff --git a/changelog.d/16016.doc b/changelog.d/16016.doc new file mode 100644 index 0000000000..e677058c2d --- /dev/null +++ b/changelog.d/16016.doc @@ -0,0 +1,2 @@ +Clarify comment on the keys/upload over replication enpoint. + diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index f874f072f9..73f3de3642 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -107,8 +107,7 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint): Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on the main process to accomplish this. - Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload - Request format(borrowed and expanded from KeyUploadServlet): + Request format for this endpoint (borrowed and expanded from KeyUploadServlet): POST /_synapse/replication/upload_keys_for_user @@ -117,6 +116,7 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint): "device_id": "", "keys": { ....this part can be found in KeyUploadServlet in rest/client/keys.py.... + or as defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload } } From ea4ece3fccae6c8b4e92de237a91af73ba2ac98a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 10:21:34 +0200 Subject: [PATCH 269/562] Bump types-netaddr from 0.8.0.8 to 0.8.0.9 (#16035) Bumps [types-netaddr](https://github.com/python/typeshed) from 0.8.0.8 to 0.8.0.9. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-netaddr dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 05139e60d5..ee7d4b67f6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2969,13 +2969,13 @@ files = [ [[package]] name = "types-netaddr" -version = "0.8.0.8" +version = "0.8.0.9" description = "Typing stubs for netaddr" optional = false python-versions = "*" files = [ - {file = "types-netaddr-0.8.0.8.tar.gz", hash = "sha256:db7e8cd16b1244e7c4541edd0df99d1039fc05fd5387c21840f0b958fc52aabc"}, - {file = "types_netaddr-0.8.0.8-py3-none-any.whl", hash = "sha256:6741b3824e2ec3f7a74842b394439b71107c7675f8ae42bb2b5e7a8ebfe8cf18"}, + {file = "types-netaddr-0.8.0.9.tar.gz", hash = "sha256:68900c267fd31627c1721c5c52b32a257657ac2777457dca49b6b096ba2faf74"}, + {file = "types_netaddr-0.8.0.9-py3-none-any.whl", hash = "sha256:63e871f064cd59473cec1177f372526f0fa3d565050247d5305bdc325be5c3f6"}, ] [[package]] From 76b221859903c6d864a347f74c7fffd517d1f606 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 10:21:48 +0200 Subject: [PATCH 270/562] Bump types-jsonschema from 4.17.0.8 to 4.17.0.10 (#16036) Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.17.0.8 to 4.17.0.10. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index ee7d4b67f6..53725a2def 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2958,13 +2958,13 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.17.0.8" +version = "4.17.0.10" description = "Typing stubs for jsonschema" optional = false python-versions = "*" files = [ - {file = "types-jsonschema-4.17.0.8.tar.gz", hash = "sha256:96a56990910f405e62de58862c0bbb3ac29ee6dba6d3d99aa0ba7f874cc547de"}, - {file = "types_jsonschema-4.17.0.8-py3-none-any.whl", hash = "sha256:f5958eb7b53217dfb5125f0412aeaef226a8a9013eac95816c95b5b523f6796b"}, + {file = "types-jsonschema-4.17.0.10.tar.gz", hash = "sha256:8e979db34d69bc9f9b3d6e8b89bdbc60b3a41cfce4e1fb87bf191d205c7f5098"}, + {file = "types_jsonschema-4.17.0.10-py3-none-any.whl", hash = "sha256:3aa2a89afbd9eaa6ce0c15618b36f02692a621433889ce73014656f7d8caf971"}, ] [[package]] From fee0195b277f29d3f64e62194f6a10ddcf73c159 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 10:23:00 +0200 Subject: [PATCH 271/562] Bump serde_json from 1.0.103 to 1.0.104 (#16032) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.103 to 1.0.104. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.103...v1.0.104) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b29a72a3b8..caf693877b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.103" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "itoa", "ryu", From 0c6142c4a1695dd11fe7430609227796927133b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 10:47:25 +0100 Subject: [PATCH 272/562] Bump types-commonmark from 0.9.2.3 to 0.9.2.4 (#16037) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 53725a2def..e8ceb9f3a7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2947,13 +2947,13 @@ files = [ [[package]] name = "types-commonmark" -version = "0.9.2.3" +version = "0.9.2.4" description = "Typing stubs for commonmark" optional = false python-versions = "*" files = [ - {file = "types-commonmark-0.9.2.3.tar.gz", hash = "sha256:42769a2c194fd5b49fd9eedfd4a83cd1d2514c6d0a36f00f5c5ffe0b6a2d2fcf"}, - {file = "types_commonmark-0.9.2.3-py3-none-any.whl", hash = "sha256:b575156e1b8a292d43acb36f861110b85c4bc7aa53bbfb5ac64addec15d18cfa"}, + {file = "types-commonmark-0.9.2.4.tar.gz", hash = "sha256:2c6486f65735cf18215cca3e962b17787fa545be279306f79b801f64a5319959"}, + {file = "types_commonmark-0.9.2.4-py3-none-any.whl", hash = "sha256:d5090fa685c3e3c0ec3a5973ff842000baef6d86f762d52209b3c5e9fbd0b555"}, ] [[package]] From ae55cc1e6bc6527d0e359a823c474f5c9ed4382e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 31 Jul 2023 10:58:03 +0100 Subject: [PATCH 273/562] Add ability to wait for locks and add locks to purge history / room deletion (#15791) c.f. #13476 --- changelog.d/15791.bugfix | 1 + synapse/federation/federation_server.py | 17 +- synapse/handlers/message.py | 38 +- synapse/handlers/pagination.py | 23 +- synapse/handlers/room_member.py | 45 +-- synapse/handlers/worker_lock.py | 333 ++++++++++++++++++ synapse/notifier.py | 16 + synapse/replication/tcp/commands.py | 33 ++ synapse/replication/tcp/handler.py | 22 ++ .../rest/client/room_upgrade_rest_servlet.py | 11 +- synapse/server.py | 5 + synapse/storage/controllers/persist_events.py | 27 +- synapse/storage/databases/main/lock.py | 192 ++++++---- tests/handlers/test_worker_lock.py | 74 ++++ tests/rest/client/test_rooms.py | 4 +- tests/storage/databases/main/test_lock.py | 52 +++ 16 files changed, 784 insertions(+), 109 deletions(-) create mode 100644 changelog.d/15791.bugfix create mode 100644 synapse/handlers/worker_lock.py create mode 100644 tests/handlers/test_worker_lock.py diff --git a/changelog.d/15791.bugfix b/changelog.d/15791.bugfix new file mode 100644 index 0000000000..182634b62f --- /dev/null +++ b/changelog.d/15791.bugfix @@ -0,0 +1 @@ +Fix bug where purging history and paginating simultaneously could lead to database corruption when using workers. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index fa61dd8c10..a90d99c4d6 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -63,6 +63,7 @@ from synapse.federation.federation_base import ( ) from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction +from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, @@ -137,6 +138,7 @@ class FederationServer(FederationBase): self._event_auth_handler = hs.get_event_auth_handler() self._room_member_handler = hs.get_room_member_handler() self._e2e_keys_handler = hs.get_e2e_keys_handler() + self._worker_lock_handler = hs.get_worker_locks_handler() self._state_storage_controller = hs.get_storage_controllers().state @@ -1236,9 +1238,18 @@ class FederationServer(FederationBase): logger.info("handling received PDU in room %s: %s", room_id, event) try: with nested_logging_context(event.event_id): - await self._federation_event_handler.on_receive_pdu( - origin, event - ) + # We're taking out a lock within a lock, which could + # lead to deadlocks if we're not careful. However, it is + # safe on this occasion as we only ever take a write + # lock when deleting a room, which we would never do + # while holding the `_INBOUND_EVENT_HANDLING_LOCK_NAME` + # lock. + async with self._worker_lock_handler.acquire_read_write_lock( + DELETE_ROOM_LOCK_NAME, room_id, write=False + ): + await self._federation_event_handler.on_receive_pdu( + origin, event + ) except FederationError as e: # XXX: Ideally we'd inform the remote we failed to process # the event, but we can't return an error in the transaction diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index fff0b5fa12..187dedae7d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -53,6 +53,7 @@ from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field from synapse.events.validator import EventValidator from synapse.handlers.directory import DirectoryHandler +from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME from synapse.logging import opentracing from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process @@ -485,6 +486,7 @@ class EventCreationHandler: self._events_shard_config = self.config.worker.events_shard_config self._instance_name = hs.get_instance_name() self._notifier = hs.get_notifier() + self._worker_lock_handler = hs.get_worker_locks_handler() self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state @@ -1010,6 +1012,37 @@ class EventCreationHandler: event.internal_metadata.stream_ordering, ) + async with self._worker_lock_handler.acquire_read_write_lock( + DELETE_ROOM_LOCK_NAME, room_id, write=False + ): + return await self._create_and_send_nonmember_event_locked( + requester=requester, + event_dict=event_dict, + allow_no_prev_events=allow_no_prev_events, + prev_event_ids=prev_event_ids, + state_event_ids=state_event_ids, + ratelimit=ratelimit, + txn_id=txn_id, + ignore_shadow_ban=ignore_shadow_ban, + outlier=outlier, + depth=depth, + ) + + async def _create_and_send_nonmember_event_locked( + self, + requester: Requester, + event_dict: dict, + allow_no_prev_events: bool = False, + prev_event_ids: Optional[List[str]] = None, + state_event_ids: Optional[List[str]] = None, + ratelimit: bool = True, + txn_id: Optional[str] = None, + ignore_shadow_ban: bool = False, + outlier: bool = False, + depth: Optional[int] = None, + ) -> Tuple[EventBase, int]: + room_id = event_dict["room_id"] + # If we don't have any prev event IDs specified then we need to # check that the host is in the room (as otherwise populating the # prev events will fail), at which point we may as well check the @@ -1923,7 +1956,10 @@ class EventCreationHandler: ) for room_id in room_ids: - dummy_event_sent = await self._send_dummy_event_for_room(room_id) + async with self._worker_lock_handler.acquire_read_write_lock( + DELETE_ROOM_LOCK_NAME, room_id, write=False + ): + dummy_event_sent = await self._send_dummy_event_for_room(room_id) if not dummy_event_sent: # Did not find a valid user in the room, so remove from future attempts diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 19b8728db9..da34658470 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -46,6 +46,11 @@ logger = logging.getLogger(__name__) BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3 +PURGE_HISTORY_LOCK_NAME = "purge_history_lock" + +DELETE_ROOM_LOCK_NAME = "delete_room_lock" + + @attr.s(slots=True, auto_attribs=True) class PurgeStatus: """Object tracking the status of a purge request @@ -142,6 +147,7 @@ class PaginationHandler: self._server_name = hs.hostname self._room_shutdown_handler = hs.get_room_shutdown_handler() self._relations_handler = hs.get_relations_handler() + self._worker_locks = hs.get_worker_locks_handler() self.pagination_lock = ReadWriteLock() # IDs of rooms in which there currently an active purge *or delete* operation. @@ -356,7 +362,9 @@ class PaginationHandler: """ self._purges_in_progress_by_room.add(room_id) try: - async with self.pagination_lock.write(room_id): + async with self._worker_locks.acquire_read_write_lock( + PURGE_HISTORY_LOCK_NAME, room_id, write=True + ): await self._storage_controllers.purge_events.purge_history( room_id, token, delete_local_events ) @@ -412,7 +420,10 @@ class PaginationHandler: room_id: room to be purged force: set true to skip checking for joined users. """ - async with self.pagination_lock.write(room_id): + async with self._worker_locks.acquire_multi_read_write_lock( + [(PURGE_HISTORY_LOCK_NAME, room_id), (DELETE_ROOM_LOCK_NAME, room_id)], + write=True, + ): # first check that we have no users in this room if not force: joined = await self.store.is_host_joined(room_id, self._server_name) @@ -471,7 +482,9 @@ class PaginationHandler: room_token = from_token.room_key - async with self.pagination_lock.read(room_id): + async with self._worker_locks.acquire_read_write_lock( + PURGE_HISTORY_LOCK_NAME, room_id, write=False + ): (membership, member_event_id) = (None, None) if not use_admin_priviledge: ( @@ -747,7 +760,9 @@ class PaginationHandler: self._purges_in_progress_by_room.add(room_id) try: - async with self.pagination_lock.write(room_id): + async with self._worker_locks.acquire_read_write_lock( + PURGE_HISTORY_LOCK_NAME, room_id, write=True + ): self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN self._delete_by_id[ delete_id diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 496e701f13..6cca2ec344 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -39,6 +39,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler +from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME from synapse.logging import opentracing from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process @@ -94,6 +95,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.event_creation_handler = hs.get_event_creation_handler() self.account_data_handler = hs.get_account_data_handler() self.event_auth_handler = hs.get_event_auth_handler() + self._worker_lock_handler = hs.get_worker_locks_handler() self.member_linearizer: Linearizer = Linearizer(name="member") self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter") @@ -638,26 +640,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # by application services), and then by room ID. async with self.member_as_limiter.queue(as_id): async with self.member_linearizer.queue(key): - with opentracing.start_active_span("update_membership_locked"): - result = await self.update_membership_locked( - requester, - target, - room_id, - action, - txn_id=txn_id, - remote_room_hosts=remote_room_hosts, - third_party_signed=third_party_signed, - ratelimit=ratelimit, - content=content, - new_room=new_room, - require_consent=require_consent, - outlier=outlier, - allow_no_prev_events=allow_no_prev_events, - prev_event_ids=prev_event_ids, - state_event_ids=state_event_ids, - depth=depth, - origin_server_ts=origin_server_ts, - ) + async with self._worker_lock_handler.acquire_read_write_lock( + DELETE_ROOM_LOCK_NAME, room_id, write=False + ): + with opentracing.start_active_span("update_membership_locked"): + result = await self.update_membership_locked( + requester, + target, + room_id, + action, + txn_id=txn_id, + remote_room_hosts=remote_room_hosts, + third_party_signed=third_party_signed, + ratelimit=ratelimit, + content=content, + new_room=new_room, + require_consent=require_consent, + outlier=outlier, + allow_no_prev_events=allow_no_prev_events, + prev_event_ids=prev_event_ids, + state_event_ids=state_event_ids, + depth=depth, + origin_server_ts=origin_server_ts, + ) return result diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py new file mode 100644 index 0000000000..72df773a86 --- /dev/null +++ b/synapse/handlers/worker_lock.py @@ -0,0 +1,333 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from types import TracebackType +from typing import ( + TYPE_CHECKING, + AsyncContextManager, + Collection, + Dict, + Optional, + Tuple, + Type, + Union, +) +from weakref import WeakSet + +import attr + +from twisted.internet import defer +from twisted.internet.interfaces import IReactorTime + +from synapse.logging.context import PreserveLoggingContext +from synapse.logging.opentracing import start_active_span +from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.storage.databases.main.lock import Lock, LockStore +from synapse.util.async_helpers import timeout_deferred + +if TYPE_CHECKING: + from synapse.logging.opentracing import opentracing + from synapse.server import HomeServer + + +DELETE_ROOM_LOCK_NAME = "delete_room_lock" + + +class WorkerLocksHandler: + """A class for waiting on taking out locks, rather than using the storage + functions directly (which don't support awaiting). + """ + + def __init__(self, hs: "HomeServer") -> None: + self._reactor = hs.get_reactor() + self._store = hs.get_datastores().main + self._clock = hs.get_clock() + self._notifier = hs.get_notifier() + self._instance_name = hs.get_instance_name() + + # Map from lock name/key to set of `WaitingLock` that are active for + # that lock. + self._locks: Dict[ + Tuple[str, str], WeakSet[Union[WaitingLock, WaitingMultiLock]] + ] = {} + + self._clock.looping_call(self._cleanup_locks, 30_000) + + self._notifier.add_lock_released_callback(self._on_lock_released) + + def acquire_lock(self, lock_name: str, lock_key: str) -> "WaitingLock": + """Acquire a standard lock, returns a context manager that will block + until the lock is acquired. + + Note: Care must be taken to avoid deadlocks. In particular, this + function does *not* timeout. + + Usage: + async with handler.acquire_lock(name, key): + # Do work while holding the lock... + """ + + lock = WaitingLock( + reactor=self._reactor, + store=self._store, + handler=self, + lock_name=lock_name, + lock_key=lock_key, + write=None, + ) + + self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock) + + return lock + + def acquire_read_write_lock( + self, + lock_name: str, + lock_key: str, + *, + write: bool, + ) -> "WaitingLock": + """Acquire a read/write lock, returns a context manager that will block + until the lock is acquired. + + Note: Care must be taken to avoid deadlocks. In particular, this + function does *not* timeout. + + Usage: + async with handler.acquire_read_write_lock(name, key, write=True): + # Do work while holding the lock... + """ + + lock = WaitingLock( + reactor=self._reactor, + store=self._store, + handler=self, + lock_name=lock_name, + lock_key=lock_key, + write=write, + ) + + self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock) + + return lock + + def acquire_multi_read_write_lock( + self, + lock_names: Collection[Tuple[str, str]], + *, + write: bool, + ) -> "WaitingMultiLock": + """Acquires multi read/write locks at once, returns a context manager + that will block until all the locks are acquired. + + This will try and acquire all locks at once, and will never hold on to a + subset of the locks. (This avoids accidentally creating deadlocks). + + Note: Care must be taken to avoid deadlocks. In particular, this + function does *not* timeout. + """ + + lock = WaitingMultiLock( + lock_names=lock_names, + write=write, + reactor=self._reactor, + store=self._store, + handler=self, + ) + + for lock_name, lock_key in lock_names: + self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock) + + return lock + + def notify_lock_released(self, lock_name: str, lock_key: str) -> None: + """Notify that a lock has been released. + + Pokes both the notifier and replication. + """ + + self._notifier.notify_lock_released(self._instance_name, lock_name, lock_key) + + def _on_lock_released( + self, instance_name: str, lock_name: str, lock_key: str + ) -> None: + """Called when a lock has been released. + + Wakes up any locks that might be waiting on this. + """ + locks = self._locks.get((lock_name, lock_key)) + if not locks: + return + + def _wake_deferred(deferred: defer.Deferred) -> None: + if not deferred.called: + deferred.callback(None) + + for lock in locks: + self._clock.call_later(0, _wake_deferred, lock.deferred) + + @wrap_as_background_process("_cleanup_locks") + async def _cleanup_locks(self) -> None: + """Periodically cleans out stale entries in the locks map""" + self._locks = {key: value for key, value in self._locks.items() if value} + + +@attr.s(auto_attribs=True, eq=False) +class WaitingLock: + reactor: IReactorTime + store: LockStore + handler: WorkerLocksHandler + lock_name: str + lock_key: str + write: Optional[bool] + deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred) + _inner_lock: Optional[Lock] = None + _retry_interval: float = 0.1 + _lock_span: "opentracing.Scope" = attr.Factory( + lambda: start_active_span("WaitingLock.lock") + ) + + async def __aenter__(self) -> None: + self._lock_span.__enter__() + + with start_active_span("WaitingLock.waiting_for_lock"): + while self._inner_lock is None: + self.deferred = defer.Deferred() + + if self.write is not None: + lock = await self.store.try_acquire_read_write_lock( + self.lock_name, self.lock_key, write=self.write + ) + else: + lock = await self.store.try_acquire_lock( + self.lock_name, self.lock_key + ) + + if lock: + self._inner_lock = lock + break + + try: + # Wait until the we get notified the lock might have been + # released (by the deferred being resolved). We also + # periodically wake up in case the lock was released but we + # weren't notified. + with PreserveLoggingContext(): + await timeout_deferred( + deferred=self.deferred, + timeout=self._get_next_retry_interval(), + reactor=self.reactor, + ) + except Exception: + pass + + return await self._inner_lock.__aenter__() + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> Optional[bool]: + assert self._inner_lock + + self.handler.notify_lock_released(self.lock_name, self.lock_key) + + try: + r = await self._inner_lock.__aexit__(exc_type, exc, tb) + finally: + self._lock_span.__exit__(exc_type, exc, tb) + + return r + + def _get_next_retry_interval(self) -> float: + next = self._retry_interval + self._retry_interval = max(5, next * 2) + return next * random.uniform(0.9, 1.1) + + +@attr.s(auto_attribs=True, eq=False) +class WaitingMultiLock: + lock_names: Collection[Tuple[str, str]] + + write: bool + + reactor: IReactorTime + store: LockStore + handler: WorkerLocksHandler + + deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred) + + _inner_lock_cm: Optional[AsyncContextManager] = None + _retry_interval: float = 0.1 + _lock_span: "opentracing.Scope" = attr.Factory( + lambda: start_active_span("WaitingLock.lock") + ) + + async def __aenter__(self) -> None: + self._lock_span.__enter__() + + with start_active_span("WaitingLock.waiting_for_lock"): + while self._inner_lock_cm is None: + self.deferred = defer.Deferred() + + lock_cm = await self.store.try_acquire_multi_read_write_lock( + self.lock_names, write=self.write + ) + + if lock_cm: + self._inner_lock_cm = lock_cm + break + + try: + # Wait until the we get notified the lock might have been + # released (by the deferred being resolved). We also + # periodically wake up in case the lock was released but we + # weren't notified. + with PreserveLoggingContext(): + await timeout_deferred( + deferred=self.deferred, + timeout=self._get_next_retry_interval(), + reactor=self.reactor, + ) + except Exception: + pass + + assert self._inner_lock_cm + await self._inner_lock_cm.__aenter__() + return + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> Optional[bool]: + assert self._inner_lock_cm + + for lock_name, lock_key in self.lock_names: + self.handler.notify_lock_released(lock_name, lock_key) + + try: + r = await self._inner_lock_cm.__aexit__(exc_type, exc, tb) + finally: + self._lock_span.__exit__(exc_type, exc, tb) + + return r + + def _get_next_retry_interval(self) -> float: + next = self._retry_interval + self._retry_interval = max(5, next * 2) + return next * random.uniform(0.9, 1.1) diff --git a/synapse/notifier.py b/synapse/notifier.py index 897272ad5b..68115bca70 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -234,6 +234,9 @@ class Notifier: self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules + # List of callbacks to be notified when a lock is released + self._lock_released_callback: List[Callable[[str, str, str], None]] = [] + self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() @@ -785,6 +788,19 @@ class Notifier: # that any in flight requests can be immediately retried. self._federation_client.wake_destination(server) + def add_lock_released_callback( + self, callback: Callable[[str, str, str], None] + ) -> None: + """Add a function to be called whenever we are notified about a released lock.""" + self._lock_released_callback.append(callback) + + def notify_lock_released( + self, instance_name: str, lock_name: str, lock_key: str + ) -> None: + """Notify the callbacks that a lock has been released.""" + for cb in self._lock_released_callback: + cb(instance_name, lock_name, lock_key) + @attr.s(auto_attribs=True) class ReplicationNotifier: diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 32f52e54d8..10f5c98ff8 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -422,6 +422,36 @@ class RemoteServerUpCommand(_SimpleCommand): NAME = "REMOTE_SERVER_UP" +class LockReleasedCommand(Command): + """Sent to inform other instances that a given lock has been dropped. + + Format:: + + LOCK_RELEASED ["", "", ""] + """ + + NAME = "LOCK_RELEASED" + + def __init__( + self, + instance_name: str, + lock_name: str, + lock_key: str, + ): + self.instance_name = instance_name + self.lock_name = lock_name + self.lock_key = lock_key + + @classmethod + def from_line(cls: Type["LockReleasedCommand"], line: str) -> "LockReleasedCommand": + instance_name, lock_name, lock_key = json_decoder.decode(line) + + return cls(instance_name, lock_name, lock_key) + + def to_line(self) -> str: + return json_encoder.encode([self.instance_name, self.lock_name, self.lock_key]) + + _COMMANDS: Tuple[Type[Command], ...] = ( ServerCommand, RdataCommand, @@ -435,6 +465,7 @@ _COMMANDS: Tuple[Type[Command], ...] = ( UserIpCommand, RemoteServerUpCommand, ClearUserSyncsCommand, + LockReleasedCommand, ) # Map of command name to command type. @@ -448,6 +479,7 @@ VALID_SERVER_COMMANDS = ( ErrorCommand.NAME, PingCommand.NAME, RemoteServerUpCommand.NAME, + LockReleasedCommand.NAME, ) # The commands the client is allowed to send @@ -461,6 +493,7 @@ VALID_CLIENT_COMMANDS = ( UserIpCommand.NAME, ErrorCommand.NAME, RemoteServerUpCommand.NAME, + LockReleasedCommand.NAME, ) diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 5d108fe11b..a2cabba7b1 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -39,6 +39,7 @@ from synapse.replication.tcp.commands import ( ClearUserSyncsCommand, Command, FederationAckCommand, + LockReleasedCommand, PositionCommand, RdataCommand, RemoteServerUpCommand, @@ -248,6 +249,9 @@ class ReplicationCommandHandler: if self._is_master or self._should_insert_client_ips: self.subscribe_to_channel("USER_IP") + if hs.config.redis.redis_enabled: + self._notifier.add_lock_released_callback(self.on_lock_released) + def subscribe_to_channel(self, channel_name: str) -> None: """ Indicates that we wish to subscribe to a Redis channel by name. @@ -648,6 +652,17 @@ class ReplicationCommandHandler: self._notifier.notify_remote_server_up(cmd.data) + def on_LOCK_RELEASED( + self, conn: IReplicationConnection, cmd: LockReleasedCommand + ) -> None: + """Called when we get a new LOCK_RELEASED command.""" + if cmd.instance_name == self._instance_name: + return + + self._notifier.notify_lock_released( + cmd.instance_name, cmd.lock_name, cmd.lock_key + ) + def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" self._connections.append(connection) @@ -754,6 +769,13 @@ class ReplicationCommandHandler: """ self.send_command(RdataCommand(stream_name, self._instance_name, token, data)) + def on_lock_released( + self, instance_name: str, lock_name: str, lock_key: str + ) -> None: + """Called when we released a lock and should notify other instances.""" + if instance_name == self._instance_name: + self.send_command(LockReleasedCommand(instance_name, lock_name, lock_key)) + UpdateToken = TypeVar("UpdateToken") UpdateRow = TypeVar("UpdateRow") diff --git a/synapse/rest/client/room_upgrade_rest_servlet.py b/synapse/rest/client/room_upgrade_rest_servlet.py index 6a7792e18b..4a5d9e13e7 100644 --- a/synapse/rest/client/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/room_upgrade_rest_servlet.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Tuple from synapse.api.errors import Codes, ShadowBanError, SynapseError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -60,6 +61,7 @@ class RoomUpgradeRestServlet(RestServlet): self._hs = hs self._room_creation_handler = hs.get_room_creation_handler() self._auth = hs.get_auth() + self._worker_lock_handler = hs.get_worker_locks_handler() async def on_POST( self, request: SynapseRequest, room_id: str @@ -78,9 +80,12 @@ class RoomUpgradeRestServlet(RestServlet): ) try: - new_room_id = await self._room_creation_handler.upgrade_room( - requester, room_id, new_version - ) + async with self._worker_lock_handler.acquire_read_write_lock( + DELETE_ROOM_LOCK_NAME, room_id, write=False + ): + new_room_id = await self._room_creation_handler.upgrade_room( + requester, room_id, new_version + ) except ShadowBanError: # Generate a random room ID. new_room_id = stringutils.random_string(18) diff --git a/synapse/server.py b/synapse/server.py index b72b76a38b..8430f99ef2 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -107,6 +107,7 @@ from synapse.handlers.stats import StatsHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler from synapse.handlers.user_directory import UserDirectoryHandler +from synapse.handlers.worker_lock import WorkerLocksHandler from synapse.http.client import ( InsecureInterceptableContextFactory, ReplicationClient, @@ -912,3 +913,7 @@ class HomeServer(metaclass=abc.ABCMeta): def get_common_usage_metrics_manager(self) -> CommonUsageMetricsManager: """Usage metrics shared between phone home stats and the prometheus exporter.""" return CommonUsageMetricsManager(self) + + @cache_in_self + def get_worker_locks_handler(self) -> WorkerLocksHandler: + return WorkerLocksHandler(self) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 35c0680365..35cd1089d6 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -45,6 +45,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase from synapse.events.snapshot import EventContext +from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.logging.opentracing import ( SynapseTags, @@ -338,6 +339,7 @@ class EventsPersistenceStorageController: ) self._state_resolution_handler = hs.get_state_resolution_handler() self._state_controller = state_controller + self.hs = hs async def _process_event_persist_queue_task( self, @@ -350,15 +352,22 @@ class EventsPersistenceStorageController: A dictionary of event ID to event ID we didn't persist as we already had another event persisted with the same TXN ID. """ - if isinstance(task, _PersistEventsTask): - return await self._persist_event_batch(room_id, task) - elif isinstance(task, _UpdateCurrentStateTask): - await self._update_current_state(room_id, task) - return {} - else: - raise AssertionError( - f"Found an unexpected task type in event persistence queue: {task}" - ) + + # Ensure that the room can't be deleted while we're persisting events to + # it. We might already have taken out the lock, but since this is just a + # "read" lock its inherently reentrant. + async with self.hs.get_worker_locks_handler().acquire_read_write_lock( + DELETE_ROOM_LOCK_NAME, room_id, write=False + ): + if isinstance(task, _PersistEventsTask): + return await self._persist_event_batch(room_id, task) + elif isinstance(task, _UpdateCurrentStateTask): + await self._update_current_state(room_id, task) + return {} + else: + raise AssertionError( + f"Found an unexpected task type in event persistence queue: {task}" + ) @trace async def persist_events( diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index c89b4f7919..1680bf6168 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from contextlib import AsyncExitStack from types import TracebackType -from typing import TYPE_CHECKING, Optional, Set, Tuple, Type +from typing import TYPE_CHECKING, Collection, Optional, Set, Tuple, Type from weakref import WeakValueDictionary from twisted.internet.interfaces import IReactorCore @@ -208,77 +209,86 @@ class LockStore(SQLBaseStore): used (otherwise the lock will leak). """ - now = self._clock.time_msec() - token = random_string(6) - - def _try_acquire_read_write_lock_txn(txn: LoggingTransaction) -> None: - # We attempt to acquire the lock by inserting into - # `worker_read_write_locks` and seeing if that fails any - # constraints. If it doesn't then we have acquired the lock, - # otherwise we haven't. - # - # Before that though we clear the table of any stale locks. - - delete_sql = """ - DELETE FROM worker_read_write_locks - WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?; - """ - - insert_sql = """ - INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts) - VALUES (?, ?, ?, ?, ?, ?) - """ - - if isinstance(self.database_engine, PostgresEngine): - # For Postgres we can send these queries at the same time. - txn.execute( - delete_sql + ";" + insert_sql, - ( - # DELETE args - now - _LOCK_TIMEOUT_MS, - lock_name, - lock_key, - # UPSERT args - lock_name, - lock_key, - write, - self._instance_name, - token, - now, - ), - ) - else: - # For SQLite these need to be two queries. - txn.execute( - delete_sql, - ( - now - _LOCK_TIMEOUT_MS, - lock_name, - lock_key, - ), - ) - txn.execute( - insert_sql, - ( - lock_name, - lock_key, - write, - self._instance_name, - token, - now, - ), - ) - - return - try: - await self.db_pool.runInteraction( + lock = await self.db_pool.runInteraction( "try_acquire_read_write_lock", - _try_acquire_read_write_lock_txn, + self._try_acquire_read_write_lock_txn, + lock_name, + lock_key, + write, ) except self.database_engine.module.IntegrityError: return None + return lock + + def _try_acquire_read_write_lock_txn( + self, + txn: LoggingTransaction, + lock_name: str, + lock_key: str, + write: bool, + ) -> "Lock": + # We attempt to acquire the lock by inserting into + # `worker_read_write_locks` and seeing if that fails any + # constraints. If it doesn't then we have acquired the lock, + # otherwise we haven't. + # + # Before that though we clear the table of any stale locks. + + now = self._clock.time_msec() + token = random_string(6) + + delete_sql = """ + DELETE FROM worker_read_write_locks + WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?; + """ + + insert_sql = """ + INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts) + VALUES (?, ?, ?, ?, ?, ?) + """ + + if isinstance(self.database_engine, PostgresEngine): + # For Postgres we can send these queries at the same time. + txn.execute( + delete_sql + ";" + insert_sql, + ( + # DELETE args + now - _LOCK_TIMEOUT_MS, + lock_name, + lock_key, + # UPSERT args + lock_name, + lock_key, + write, + self._instance_name, + token, + now, + ), + ) + else: + # For SQLite these need to be two queries. + txn.execute( + delete_sql, + ( + now - _LOCK_TIMEOUT_MS, + lock_name, + lock_key, + ), + ) + txn.execute( + insert_sql, + ( + lock_name, + lock_key, + write, + self._instance_name, + token, + now, + ), + ) + lock = Lock( self._reactor, self._clock, @@ -289,10 +299,58 @@ class LockStore(SQLBaseStore): token=token, ) - self._live_read_write_lock_tokens[(lock_name, lock_key, token)] = lock + def set_lock() -> None: + self._live_read_write_lock_tokens[(lock_name, lock_key, token)] = lock + + txn.call_after(set_lock) return lock + async def try_acquire_multi_read_write_lock( + self, + lock_names: Collection[Tuple[str, str]], + write: bool, + ) -> Optional[AsyncExitStack]: + """Try to acquire multiple locks for the given names/keys. Will return + an async context manager if the locks are successfully acquired, which + *must* be used (otherwise the lock will leak). + + If only a subset of the locks can be acquired then it will immediately + drop them and return `None`. + """ + try: + locks = await self.db_pool.runInteraction( + "try_acquire_multi_read_write_lock", + self._try_acquire_multi_read_write_lock_txn, + lock_names, + write, + ) + except self.database_engine.module.IntegrityError: + return None + + stack = AsyncExitStack() + + for lock in locks: + await stack.enter_async_context(lock) + + return stack + + def _try_acquire_multi_read_write_lock_txn( + self, + txn: LoggingTransaction, + lock_names: Collection[Tuple[str, str]], + write: bool, + ) -> Collection["Lock"]: + locks = [] + + for lock_name, lock_key in lock_names: + lock = self._try_acquire_read_write_lock_txn( + txn, lock_name, lock_key, write + ) + locks.append(lock) + + return locks + class Lock: """An async context manager that manages an acquired lock, ensuring it is diff --git a/tests/handlers/test_worker_lock.py b/tests/handlers/test_worker_lock.py new file mode 100644 index 0000000000..73e548726c --- /dev/null +++ b/tests/handlers/test_worker_lock.py @@ -0,0 +1,74 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest +from tests.replication._base import BaseMultiWorkerStreamTestCase + + +class WorkerLockTestCase(unittest.HomeserverTestCase): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self.worker_lock_handler = self.hs.get_worker_locks_handler() + + def test_wait_for_lock_locally(self) -> None: + """Test waiting for a lock on a single worker""" + + lock1 = self.worker_lock_handler.acquire_lock("name", "key") + self.get_success(lock1.__aenter__()) + + lock2 = self.worker_lock_handler.acquire_lock("name", "key") + d2 = defer.ensureDeferred(lock2.__aenter__()) + self.assertNoResult(d2) + + self.get_success(lock1.__aexit__(None, None, None)) + + self.get_success(d2) + self.get_success(lock2.__aexit__(None, None, None)) + + +class WorkerLockWorkersTestCase(BaseMultiWorkerStreamTestCase): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self.main_worker_lock_handler = self.hs.get_worker_locks_handler() + + def test_wait_for_lock_worker(self) -> None: + """Test waiting for a lock on another worker""" + + worker = self.make_worker_hs( + "synapse.app.generic_worker", + extra_config={ + "redis": {"enabled": True}, + }, + ) + worker_lock_handler = worker.get_worker_locks_handler() + + lock1 = self.main_worker_lock_handler.acquire_lock("name", "key") + self.get_success(lock1.__aenter__()) + + lock2 = worker_lock_handler.acquire_lock("name", "key") + d2 = defer.ensureDeferred(lock2.__aenter__()) + self.assertNoResult(d2) + + self.get_success(lock1.__aexit__(None, None, None)) + + self.get_success(d2) + self.get_success(lock2.__aexit__(None, None, None)) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index d013e75d55..4f6347be15 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -711,7 +711,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(30, channel.resource_usage.db_txn_count) + self.assertEqual(32, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -724,7 +724,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(32, channel.resource_usage.db_txn_count) + self.assertEqual(34, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index ad454f6dd8..383da83dfb 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -448,3 +448,55 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase): self.get_success(self.store._on_shutdown()) self.assertEqual(self.store._live_read_write_lock_tokens, {}) + + def test_acquire_multiple_locks(self) -> None: + """Tests that acquiring multiple locks at once works.""" + + # Take out multiple locks and ensure that we can't get those locks out + # again. + lock = self.get_success( + self.store.try_acquire_multi_read_write_lock( + [("name1", "key1"), ("name2", "key2")], write=True + ) + ) + self.assertIsNotNone(lock) + + assert lock is not None + self.get_success(lock.__aenter__()) + + lock2 = self.get_success( + self.store.try_acquire_read_write_lock("name1", "key1", write=True) + ) + self.assertIsNone(lock2) + + lock3 = self.get_success( + self.store.try_acquire_read_write_lock("name2", "key2", write=False) + ) + self.assertIsNone(lock3) + + # Overlapping locks attempts will fail, and won't lock any locks. + lock4 = self.get_success( + self.store.try_acquire_multi_read_write_lock( + [("name1", "key1"), ("name3", "key3")], write=True + ) + ) + self.assertIsNone(lock4) + + lock5 = self.get_success( + self.store.try_acquire_read_write_lock("name3", "key3", write=True) + ) + self.assertIsNotNone(lock5) + assert lock5 is not None + self.get_success(lock5.__aenter__()) + self.get_success(lock5.__aexit__(None, None, None)) + + # Once we release the lock we can take out the locks again. + self.get_success(lock.__aexit__(None, None, None)) + + lock6 = self.get_success( + self.store.try_acquire_read_write_lock("name1", "key1", write=True) + ) + self.assertIsNotNone(lock6) + assert lock6 is not None + self.get_success(lock6.__aenter__()) + self.get_success(lock6.__aexit__(None, None, None)) From 21407c67097d725d71c56e1e0f8fbe5fce272da5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 13:24:32 +0200 Subject: [PATCH 274/562] Bump service-identity from 21.1.0 to 23.1.0 (#16038) Bumps [service-identity](https://github.com/pyca/service-identity) from 21.1.0 to 23.1.0. - [Release notes](https://github.com/pyca/service-identity/releases) - [Changelog](https://github.com/pyca/service-identity/blob/main/CHANGELOG.md) - [Commits](https://github.com/pyca/service-identity/compare/21.1.0...23.1.0) --- updated-dependencies: - dependency-name: service-identity dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index e8ceb9f3a7..ec1c70c821 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2427,13 +2427,13 @@ tornado = ["tornado (>=5)"] [[package]] name = "service-identity" -version = "21.1.0" +version = "23.1.0" description = "Service identity verification for pyOpenSSL & cryptography." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, - {file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"}, + {file = "service_identity-23.1.0-py3-none-any.whl", hash = "sha256:87415a691d52fcad954a500cb81f424d0273f8e7e3ee7d766128f4575080f383"}, + {file = "service_identity-23.1.0.tar.gz", hash = "sha256:ecb33cd96307755041e978ab14f8b14e13b40f1fbd525a4dc78f46d2b986431d"}, ] [package.dependencies] @@ -2441,12 +2441,12 @@ attrs = ">=19.1.0" cryptography = "*" pyasn1 = "*" pyasn1-modules = "*" -six = "*" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "furo", "idna", "pyOpenSSL", "pytest", "sphinx"] -docs = ["furo", "sphinx"] +dev = ["pyopenssl", "service-identity[docs,idna,mypy,tests]"] +docs = ["furo", "myst-parser", "pyopenssl", "sphinx", "sphinx-notfound-page"] idna = ["idna"] +mypy = ["idna", "mypy", "types-pyopenssl"] tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] From e02f4b7de287f49476ee5b60e3e439eb8bb11047 Mon Sep 17 00:00:00 2001 From: Nils Date: Mon, 31 Jul 2023 13:25:06 +0200 Subject: [PATCH 275/562] Do not expose Admin API in caddy reverse proxy example (#16027) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Nils ANDRÉ-CHANG --- changelog.d/16027.doc | 1 + docs/reverse_proxy.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16027.doc diff --git a/changelog.d/16027.doc b/changelog.d/16027.doc new file mode 100644 index 0000000000..201e88d6b6 --- /dev/null +++ b/changelog.d/16027.doc @@ -0,0 +1 @@ +Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index 06337e7c00..fe9519b4b6 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -95,7 +95,7 @@ matrix.example.com { } example.com:8448 { - reverse_proxy localhost:8008 + reverse_proxy /_matrix/* localhost:8008 } ``` From fa2c116befee281d1fc35046e22373a6747a2751 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 13:27:17 +0200 Subject: [PATCH 276/562] Bump immutabledict from 2.2.4 to 3.0.0 (#16034) Bumps [immutabledict](https://github.com/corenting/immutabledict) from 2.2.4 to 3.0.0. - [Release notes](https://github.com/corenting/immutabledict/releases) - [Changelog](https://github.com/corenting/immutabledict/blob/master/CHANGELOG.md) - [Commits](https://github.com/corenting/immutabledict/compare/v2.2.4...v3.0.0) --- updated-dependencies: - dependency-name: immutabledict dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index ec1c70c821..94baffe496 100644 --- a/poetry.lock +++ b/poetry.lock @@ -824,13 +824,13 @@ files = [ [[package]] name = "immutabledict" -version = "2.2.4" +version = "3.0.0" description = "Immutable wrapper around dictionaries (a fork of frozendict)" optional = false -python-versions = ">=3.7,<4.0" +python-versions = ">=3.8,<4.0" files = [ - {file = "immutabledict-2.2.4-py3-none-any.whl", hash = "sha256:c827715c147d2364522f9a7709cc424c7001015274a3c705250e673605bde64b"}, - {file = "immutabledict-2.2.4.tar.gz", hash = "sha256:3bedc0741faaa2846f6edf5c29183f993da3abaff6a5961bb70a5659bb9e68ab"}, + {file = "immutabledict-3.0.0-py3-none-any.whl", hash = "sha256:034bacc6c6872707c4ec0ea9515de6bbe0dcf0fcabd97ae19fd4e4c338f05798"}, + {file = "immutabledict-3.0.0.tar.gz", hash = "sha256:5a23cd369a6187f76a8c29d7d687980b092538eb9800e58964603f1b973c56fe"}, ] [[package]] From 1fb5a7ad5dde4e7e120b05d24d47c5b569acc1a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 14:08:35 +0200 Subject: [PATCH 277/562] Bump serde from 1.0.175 to 1.0.179 (#16033) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.175 to 1.0.179. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.175...v1.0.179) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index caf693877b..d28fe3c228 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.175" +version = "1.0.179" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d25439cd7397d044e2748a6fe2432b5e85db703d6d097bd014b3c0ad1ebff0b" +checksum = "0a5bf42b8d227d4abf38a1ddb08602e229108a517cd4e5bb28f9c7eaafdce5c0" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.175" +version = "1.0.179" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b23f7ade6f110613c0d63858ddb8b94c1041f550eab58a16b371bdf2c9c80ab4" +checksum = "741e124f5485c7e60c03b043f79f320bff3527f4bbf12cf3831750dc46a0ec2c" dependencies = [ "proc-macro2", "quote", From b7695ac38843d679b7121495729e0d433c37688e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 31 Jul 2023 08:44:45 -0400 Subject: [PATCH 278/562] Combine duplicated code for calculating an event ID from a txn ID (#16023) Refactoring related to stabilization of MSC3970, refactor to combine code which has the same logic. --- changelog.d/16023.misc | 1 + synapse/handlers/message.py | 81 +++++++++++++++++++++------------ synapse/handlers/room_member.py | 28 ++---------- 3 files changed, 57 insertions(+), 53 deletions(-) create mode 100644 changelog.d/16023.misc diff --git a/changelog.d/16023.misc b/changelog.d/16023.misc new file mode 100644 index 0000000000..ee732318e4 --- /dev/null +++ b/changelog.d/16023.misc @@ -0,0 +1 @@ +Combine duplicated code. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 187dedae7d..c656e07d37 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -878,6 +878,53 @@ class EventCreationHandler: return prev_event return None + async def get_event_id_from_transaction( + self, + requester: Requester, + txn_id: str, + room_id: str, + ) -> Optional[str]: + """For the given transaction ID and room ID, check if there is a matching event ID. + + Args: + requester: The requester making the request in the context of which we want + to fetch the event. + txn_id: The transaction ID. + room_id: The room ID. + + Returns: + An event ID if one could be found, None otherwise. + """ + existing_event_id = None + + if self._msc3970_enabled and requester.device_id: + # When MSC3970 is enabled, we lookup for events sent by the same device first, + # and fallback to the old behaviour if none were found. + existing_event_id = ( + await self.store.get_event_id_from_transaction_id_and_device_id( + room_id, + requester.user.to_string(), + requester.device_id, + txn_id, + ) + ) + if existing_event_id: + return existing_event_id + + # Pre-MSC3970, we looked up for events that were sent by the same session by + # using the access token ID. + if requester.access_token_id: + existing_event_id = ( + await self.store.get_event_id_from_transaction_id_and_token_id( + room_id, + requester.user.to_string(), + requester.access_token_id, + txn_id, + ) + ) + + return existing_event_id + async def get_event_from_transaction( self, requester: Requester, @@ -896,35 +943,11 @@ class EventCreationHandler: Returns: An event if one could be found, None otherwise. """ - - if self._msc3970_enabled and requester.device_id: - # When MSC3970 is enabled, we lookup for events sent by the same device first, - # and fallback to the old behaviour if none were found. - existing_event_id = ( - await self.store.get_event_id_from_transaction_id_and_device_id( - room_id, - requester.user.to_string(), - requester.device_id, - txn_id, - ) - ) - if existing_event_id: - return await self.store.get_event(existing_event_id) - - # Pre-MSC3970, we looked up for events that were sent by the same session by - # using the access token ID. - if requester.access_token_id: - existing_event_id = ( - await self.store.get_event_id_from_transaction_id_and_token_id( - room_id, - requester.user.to_string(), - requester.access_token_id, - txn_id, - ) - ) - if existing_event_id: - return await self.store.get_event(existing_event_id) - + existing_event_id = await self.get_event_id_from_transaction( + requester, txn_id, room_id + ) + if existing_event_id: + return await self.store.get_event(existing_event_id) return None async def create_and_send_nonmember_event( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 6cca2ec344..e3cdf2bc61 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -176,8 +176,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.request_ratelimiter = hs.get_request_ratelimiter() hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room) - self._msc3970_enabled = hs.config.experimental.msc3970_enabled - def _on_user_joined_room(self, event_id: str, room_id: str) -> None: """Notify the rate limiter that a room join has occurred. @@ -418,29 +416,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # do this check just before we persist an event as well, but may as well # do it up front for efficiency.) if txn_id: - existing_event_id = None - if self._msc3970_enabled and requester.device_id: - # When MSC3970 is enabled, we lookup for events sent by the same device - # first, and fallback to the old behaviour if none were found. - existing_event_id = ( - await self.store.get_event_id_from_transaction_id_and_device_id( - room_id, - requester.user.to_string(), - requester.device_id, - txn_id, - ) + existing_event_id = ( + await self.event_creation_handler.get_event_id_from_transaction( + requester, txn_id, room_id ) - - if requester.access_token_id and not existing_event_id: - existing_event_id = ( - await self.store.get_event_id_from_transaction_id_and_token_id( - room_id, - requester.user.to_string(), - requester.access_token_id, - txn_id, - ) - ) - + ) if existing_event_id: event_pos = await self.store.get_position_for_event(existing_event_id) return existing_event_id, event_pos.stream From 190c990a76ac0faaaec31340a721cee4d172016a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Aug 2023 11:09:30 +0100 Subject: [PATCH 279/562] 1.89.0 --- CHANGES.md | 5 +++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index c0570b1fd0..74125613f2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,8 @@ +# Synapse 1.89.0 (2023-08-01) + +No significant changes since 1.89.0rc1. + + # Synapse 1.89.0rc1 (2023-07-25) ### Features diff --git a/debian/changelog b/debian/changelog index 384edbdab1..90240b8082 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.89.0) stable; urgency=medium + + * New Synapse release 1.89.0. + + -- Synapse Packaging team Tue, 01 Aug 2023 11:07:15 +0100 + matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium * New Synapse release 1.89.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 89c5edb4db..8304d25221 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.89.0rc1" +version = "1.89.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 7cbb2a00d1ed07d42c6fa1fb226db512cd2a6b90 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 1 Aug 2023 07:10:49 -0500 Subject: [PATCH 280/562] Add metrics tracking for eviction to ResponseCache (#16028) Track whether the ResponseCache is evicting due to invalidation or due to time. --- changelog.d/16028.misc | 1 + synapse/util/caches/response_cache.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16028.misc diff --git a/changelog.d/16028.misc b/changelog.d/16028.misc new file mode 100644 index 0000000000..3a1e9fef09 --- /dev/null +++ b/changelog.d/16028.misc @@ -0,0 +1 @@ +Collect additional metrics from `ResponseCache` for eviction. diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 340e5e9145..0cb46700a9 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -36,7 +36,7 @@ from synapse.logging.opentracing import ( ) from synapse.util import Clock from synapse.util.async_helpers import AbstractObservableDeferred, ObservableDeferred -from synapse.util.caches import register_cache +from synapse.util.caches import EvictionReason, register_cache logger = logging.getLogger(__name__) @@ -167,7 +167,7 @@ class ResponseCache(Generic[KV]): # the should_cache bit, we leave it in the cache for now and schedule # its removal later. if self.timeout_sec and context.should_cache: - self.clock.call_later(self.timeout_sec, self.unset, key) + self.clock.call_later(self.timeout_sec, self._entry_timeout, key) else: # otherwise, remove the result immediately. self.unset(key) @@ -185,6 +185,12 @@ class ResponseCache(Generic[KV]): Args: key: key used to remove the cached value """ + self._metrics.inc_evictions(EvictionReason.invalidation) + self._result_cache.pop(key, None) + + def _entry_timeout(self, key: KV) -> None: + """For the call_later to remove from the cache""" + self._metrics.inc_evictions(EvictionReason.time) self._result_cache.pop(key, None) async def wrap( From 5eb3fd785bdbf2ae07031f13a6ac5fb578adc338 Mon Sep 17 00:00:00 2001 From: Mohit Rathee Date: Tue, 1 Aug 2023 18:44:02 +0530 Subject: [PATCH 281/562] Trim whitespace when setting display names (#16031) --- changelog.d/16031.bugfix | 1 + synapse/handlers/profile.py | 2 +- tests/rest/client/test_profile.py | 12 ++++++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16031.bugfix diff --git a/changelog.d/16031.bugfix b/changelog.d/16031.bugfix new file mode 100644 index 0000000000..e48bf3975c --- /dev/null +++ b/changelog.d/16031.bugfix @@ -0,0 +1 @@ +Remove leading and trailing spaces when setting a display name. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index c7fe101cd9..c2109036ec 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -163,7 +163,7 @@ class ProfileHandler: 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) ) - displayname_to_set: Optional[str] = new_displayname + displayname_to_set: Optional[str] = new_displayname.strip() if new_displayname == "": displayname_to_set = None diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 27c93ad761..ecae092b47 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -68,6 +68,18 @@ class ProfileTestCase(unittest.HomeserverTestCase): res = self._get_displayname() self.assertEqual(res, "test") + def test_set_displayname_with_extra_spaces(self) -> None: + channel = self.make_request( + "PUT", + "/profile/%s/displayname" % (self.owner,), + content={"displayname": " test "}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + res = self._get_displayname() + self.assertEqual(res, "test") + def test_set_displayname_noauth(self) -> None: channel = self.make_request( "PUT", From 90ad836ed8f4b701580213a89f2befb742c88b5e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 1 Aug 2023 10:36:33 -0400 Subject: [PATCH 282/562] Properly setup the additional sequences in the portdb script. (#16043) The un_partial_stated_event_stream_sequence and application_services_txn_id_seq were never properly configured in the portdb script, resulting in an error on start-up. --- changelog.d/16043.bugfix | 1 + synapse/_scripts/synapse_port_db.py | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16043.bugfix diff --git a/changelog.d/16043.bugfix b/changelog.d/16043.bugfix new file mode 100644 index 0000000000..78c0f3455a --- /dev/null +++ b/changelog.d/16043.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 7c4aa0afa2..22c84fbd5b 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -761,7 +761,7 @@ class Porter: # Step 2. Set up sequences # - # We do this before porting the tables so that event if we fail half + # We do this before porting the tables so that even if we fail half # way through the postgres DB always have sequences that are greater # than their respective tables. If we don't then creating the # `DataStore` object will fail due to the inconsistency. @@ -769,6 +769,10 @@ class Porter: await self._setup_state_group_id_seq() await self._setup_user_id_seq() await self._setup_events_stream_seqs() + await self._setup_sequence( + "un_partial_stated_event_stream_sequence", + ("un_partial_stated_event_stream",), + ) await self._setup_sequence( "device_inbox_sequence", ("device_inbox", "device_federation_outbox") ) @@ -779,6 +783,11 @@ class Porter: await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) await self._setup_auth_chain_sequence() + await self._setup_sequence( + "application_services_txn_id_seq", + ("application_services_txns",), + "txn_id", + ) # Step 3. Get tables. self.progress.set_state("Fetching tables") @@ -1083,7 +1092,10 @@ class Porter: ) async def _setup_sequence( - self, sequence_name: str, stream_id_tables: Iterable[str] + self, + sequence_name: str, + stream_id_tables: Iterable[str], + column_name: str = "stream_id", ) -> None: """Set a sequence to the correct value.""" current_stream_ids = [] @@ -1093,7 +1105,7 @@ class Porter: await self.sqlite_store.db_pool.simple_select_one_onecol( table=stream_id_table, keyvalues={}, - retcol="COALESCE(MAX(stream_id), 1)", + retcol=f"COALESCE(MAX({column_name}), 1)", allow_none=True, ), ) From 8fe1fd906a0e8895ba2291f03a52db5a0062f06a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 1 Aug 2023 11:55:58 -0400 Subject: [PATCH 283/562] Update certifi to 2023.7.22 and pygments to 2.15.1. (#16044) --- changelog.d/16044.misc | 1 + poetry.lock | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16044.misc diff --git a/changelog.d/16044.misc b/changelog.d/16044.misc new file mode 100644 index 0000000000..2e7137ccc2 --- /dev/null +++ b/changelog.d/16044.misc @@ -0,0 +1 @@ +Update certifi to 2023.7.22 and pygments to 2.15.1. diff --git a/poetry.lock b/poetry.lock index 94baffe496..ae92c8b9c1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -226,13 +226,13 @@ files = [ [[package]] name = "certifi" -version = "2022.12.7" +version = "2023.7.22" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, ] [[package]] @@ -1898,13 +1898,13 @@ requests = ">=2.14.0" [[package]] name = "pygments" -version = "2.14.0" +version = "2.15.1" description = "Pygments is a syntax highlighting package written in Python." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"}, - {file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"}, + {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, + {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, ] [package.extras] From a51b0862a127c3a6830ee3d2da79c86fb931de0a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 2 Aug 2023 07:47:16 +0100 Subject: [PATCH 284/562] Update `flake.lock` to fix running the nix developer environment on MacOS (#16019) --- changelog.d/16019.misc | 1 + flake.lock | 60 +++++++++++++++++++++++++++--------------- flake.nix | 4 +-- 3 files changed, 42 insertions(+), 23 deletions(-) create mode 100644 changelog.d/16019.misc diff --git a/changelog.d/16019.misc b/changelog.d/16019.misc new file mode 100644 index 0000000000..0e583302ee --- /dev/null +++ b/changelog.d/16019.misc @@ -0,0 +1 @@ +Fix building the nix development environment on MacOS systems. \ No newline at end of file diff --git a/flake.lock b/flake.lock index eb5a65e445..084c40fe2f 100644 --- a/flake.lock +++ b/flake.lock @@ -8,11 +8,11 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1683102061, - "narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=", + "lastModified": 1690534632, + "narHash": "sha256-kOXS9x5y17VKliC7wZxyszAYrWdRl1JzggbQl0gyo94=", "owner": "cachix", "repo": "devenv", - "rev": "ff1f29e41756553174d596cafe3a9fa77595100b", + "rev": "6568e7e485a46bbf32051e4d6347fa1fed8b2f25", "type": "github" }, "original": { @@ -39,12 +39,15 @@ } }, "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", "type": "github" }, "original": { @@ -55,7 +58,7 @@ }, "flake-utils_2": { "inputs": { - "systems": "systems" + "systems": "systems_2" }, "locked": { "lastModified": 1681202837, @@ -167,27 +170,27 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1673800717, - "narHash": "sha256-SFHraUqLSu5cC6IxTprex/nTsI81ZQAtDvlBvGDWfnA=", + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2f9fd351ec37f5d479556cd48be4ca340da59b8f", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1682519441, - "narHash": "sha256-Vsq/8NOtvW1AoC6shCBxRxZyMQ+LhvPuJT6ltbzuv+Y=", + "lastModified": 1690535733, + "narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "7a32a141db568abde9bc389845949dc2a454dfd3", + "rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a", "type": "github" }, "original": { @@ -228,11 +231,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1678376203, - "narHash": "sha256-3tyYGyC8h7fBwncLZy5nCUjTJPrHbmNwp47LlNLOHSM=", + "lastModified": 1688056373, + "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "1a20b9708962096ec2481eeb2ddca29ed747770a", + "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", "type": "github" }, "original": { @@ -246,7 +249,7 @@ "devenv": "devenv", "nixpkgs": "nixpkgs_2", "rust-overlay": "rust-overlay", - "systems": "systems_2" + "systems": "systems_3" } }, "rust-overlay": { @@ -255,11 +258,11 @@ "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1689302058, - "narHash": "sha256-yD74lcHTrw4niXcE9goJLbzsgyce48rQQoy5jK5ZK40=", + "lastModified": 1690510705, + "narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "7b8dbbf4c67ed05a9bf3d9e658c12d4108bc24c8", + "rev": "851ae4c128905a62834d53ce7704ebc1ba481bea", "type": "github" }, "original": { @@ -297,6 +300,21 @@ "repo": "default", "type": "github" } + }, + "systems_3": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index bacb70f478..e70a41dfc2 100644 --- a/flake.nix +++ b/flake.nix @@ -39,8 +39,8 @@ { inputs = { - # Use the master/unstable branch of nixpkgs. The latest stable, 22.11, - # does not contain 'perl536Packages.NetAsyncHTTP', needed by Sytest. + # Use the master/unstable branch of nixpkgs. Used to fetch the latest + # available versions of packages. nixpkgs.url = "github:NixOS/nixpkgs/master"; # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS). systems.url = "github:nix-systems/default"; From ca5d5de79b203f41e0fafa5ef1e7c8d28f63049d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Aug 2023 09:46:32 +0000 Subject: [PATCH 285/562] Bump cryptography from 41.0.2 to 41.0.3 (#16048) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/poetry.lock b/poetry.lock index ae92c8b9c1..75eac9dc7f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -460,34 +460,34 @@ files = [ [[package]] name = "cryptography" -version = "41.0.2" +version = "41.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711"}, - {file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7"}, - {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d"}, - {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f"}, - {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182"}, - {file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83"}, - {file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5"}, - {file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58"}, - {file = "cryptography-41.0.2-cp37-abi3-win32.whl", hash = "sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76"}, - {file = "cryptography-41.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4"}, - {file = "cryptography-41.0.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a"}, - {file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd"}, - {file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766"}, - {file = "cryptography-41.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee"}, - {file = "cryptography-41.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831"}, - {file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b"}, - {file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa"}, - {file = "cryptography-41.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e"}, - {file = "cryptography-41.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14"}, - {file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2"}, - {file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f"}, - {file = "cryptography-41.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0"}, - {file = "cryptography-41.0.2.tar.gz", hash = "sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c"}, + {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"}, + {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"}, + {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"}, + {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"}, + {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"}, + {file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"}, + {file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"}, + {file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"}, + {file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"}, + {file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"}, + {file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"}, ] [package.dependencies] From 01a45869f034265b9757992aa1a5eb7a0923351c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Aug 2023 08:41:32 -0400 Subject: [PATCH 286/562] Update MSC3958 support to interact with intentional mentions. (#15992) * Updates the rule ID. * Use `event_property_is` instead of `event_match`. This updates the implementation of MSC3958 to match the latest text from the MSC. --- changelog.d/15992.misc | 1 + rust/benches/evaluator.rs | 27 ++++++++------- rust/src/push/base_rules.rs | 37 ++++++++++----------- rust/src/push/evaluator.rs | 14 ++++---- rust/src/push/mod.rs | 6 ++-- tests/push/test_bulk_push_rule_evaluator.py | 21 ++++++++++-- 6 files changed, 64 insertions(+), 42 deletions(-) create mode 100644 changelog.d/15992.misc diff --git a/changelog.d/15992.misc b/changelog.d/15992.misc new file mode 100644 index 0000000000..539f55b475 --- /dev/null +++ b/changelog.d/15992.misc @@ -0,0 +1 @@ +Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index c2f33258a4..6e1eab2a3b 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -13,6 +13,9 @@ // limitations under the License. #![feature(test)] + +use std::borrow::Cow; + use synapse::push::{ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue, PushRules, SimpleJsonValue, @@ -26,15 +29,15 @@ fn bench_match_exact(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))), ), ( "room_id".to_string(), - JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))), ), ( "content.body".to_string(), - JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))), ), ] .into_iter() @@ -71,15 +74,15 @@ fn bench_match_word(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))), ), ( "room_id".to_string(), - JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))), ), ( "content.body".to_string(), - JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))), ), ] .into_iter() @@ -116,15 +119,15 @@ fn bench_match_word_miss(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))), ), ( "room_id".to_string(), - JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))), ), ( "content.body".to_string(), - JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))), ), ] .into_iter() @@ -161,15 +164,15 @@ fn bench_eval_message(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))), ), ( "room_id".to_string(), - JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))), ), ( "content.body".to_string(), - JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))), ), ] .into_iter() diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 7eea9313f0..00baceda91 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -63,22 +63,6 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule { }]; pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ - // We don't want to notify on edits. Not only can this be confusing in real - // time (2 notifications, one message) but it's especially confusing - // if a bridge needs to edit a previously backfilled message. - PushRule { - rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"), - priority_class: 5, - conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( - EventMatchCondition { - key: Cow::Borrowed("content.m\\.relates_to.rel_type"), - pattern: Cow::Borrowed("m.replace"), - }, - ))]), - actions: Cow::Borrowed(&[]), - default: true, - default_enabled: true, - }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"), priority_class: 5, @@ -146,7 +130,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known( KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition { - key: Cow::Borrowed("content.m\\.mentions.user_ids"), + key: Cow::Borrowed(r"content.m\.mentions.user_ids"), value_type: Cow::Borrowed(&EventMatchPatternType::UserId), }), )]), @@ -167,8 +151,8 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ priority_class: 5, conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { - key: Cow::Borrowed("content.m\\.mentions.room"), - value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), + key: Cow::Borrowed(r"content.m\.mentions.room"), + value: Cow::Owned(SimpleJsonValue::Bool(true)), })), Condition::Known(KnownCondition::SenderNotificationPermission { key: Cow::Borrowed("room"), @@ -241,6 +225,21 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default: true, default_enabled: true, }, + // We don't want to notify on edits *unless* the edit directly mentions a + // user, which is handled above. + PushRule { + rule_id: Cow::Borrowed("global/override/.org.matrix.msc3958.suppress_edits"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs( + EventPropertyIsCondition { + key: Cow::Borrowed(r"content.m\.relates_to.rel_type"), + value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))), + }, + ))]), + actions: Cow::Borrowed(&[]), + default: true, + default_enabled: true, + }, PushRule { rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"), priority_class: 5, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 59c53b1776..48e670478b 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -117,7 +117,7 @@ impl PushRuleEvaluator { msc3931_enabled: bool, ) -> Result { let body = match flattened_keys.get("content.body") { - Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(), + Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(), _ => String::new(), }; @@ -313,13 +313,15 @@ impl PushRuleEvaluator { }; let pattern = match &*exact_event_match.value_type { - EventMatchPatternType::UserId => user_id, - EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + EventMatchPatternType::UserId => user_id.to_owned(), + EventMatchPatternType::UserLocalpart => { + get_localpart_from_id(user_id)?.to_owned() + } }; self.match_event_property_contains( exact_event_match.key.clone(), - Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())), + Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))), )? } KnownCondition::ContainsDisplayName => { @@ -494,7 +496,7 @@ fn push_rule_evaluator() { let mut flattened_keys = BTreeMap::new(); flattened_keys.insert( "content.body".to_string(), - JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))), ); let evaluator = PushRuleEvaluator::py_new( flattened_keys, @@ -522,7 +524,7 @@ fn test_requires_room_version_supports_condition() { let mut flattened_keys = BTreeMap::new(); flattened_keys.insert( "content.body".to_string(), - JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())), + JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))), ); let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; let evaluator = PushRuleEvaluator::py_new( diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 514980579b..829fb79d0e 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -256,7 +256,7 @@ impl<'de> Deserialize<'de> for Action { #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(untagged)] pub enum SimpleJsonValue { - Str(String), + Str(Cow<'static, str>), Int(i64), Bool(bool), Null, @@ -265,7 +265,7 @@ pub enum SimpleJsonValue { impl<'source> FromPyObject<'source> for SimpleJsonValue { fn extract(ob: &'source PyAny) -> PyResult { if let Ok(s) = ::try_from(ob) { - Ok(SimpleJsonValue::Str(s.to_string())) + Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string()))) // A bool *is* an int, ensure we try bool first. } else if let Ok(b) = ::try_from(ob) { Ok(SimpleJsonValue::Bool(b.extract()?)) @@ -585,7 +585,7 @@ impl FilteredPushRules { } if !self.msc3958_suppress_edits_enabled - && rule.rule_id == "global/override/.com.beeper.suppress_edits" + && rule.rule_id == "global/override/.org.matrix.msc3958.suppress_edits" { return false; } diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 1e06f86071..829b9df83d 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -409,12 +409,12 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): ) ) - # Room mentions from those without power should not notify. + # The edit should not cause a notification. self.assertFalse( self._create_and_process( bulk_evaluator, { - "body": self.alice, + "body": "Test message", "m.relates_to": { "rel_type": RelationTypes.REPLACE, "event_id": event.event_id, @@ -422,3 +422,20 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): }, ) ) + + # An edit which is a mention will cause a notification. + self.assertTrue( + self._create_and_process( + bulk_evaluator, + { + "body": "Test message", + "m.relates_to": { + "rel_type": RelationTypes.REPLACE, + "event_id": event.event_id, + }, + "m.mentions": { + "user_ids": [self.alice], + }, + }, + ) + ) From 4f5bccbbba13ba10412497cb92a1460535cf7a25 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Aug 2023 11:35:54 -0400 Subject: [PATCH 287/562] Add forward-compatibility for the redacts property (MSC2174). (#16013) The location of the redacts field changes in room version 11. Ensure it is copied to the *new* location for *old* room versions for forwards-compatibility with clients. Note that copying it to the *old* location for the *new* room version was previously handled. --- changelog.d/16013.misc | 1 + synapse/events/utils.py | 18 ++++---- tests/rest/client/test_redactions.py | 67 +++++++++++++++++++++------- 3 files changed, 61 insertions(+), 25 deletions(-) create mode 100644 changelog.d/16013.misc diff --git a/changelog.d/16013.misc b/changelog.d/16013.misc new file mode 100644 index 0000000000..bd161e13ed --- /dev/null +++ b/changelog.d/16013.misc @@ -0,0 +1 @@ +Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index c890833b1d..967a6c245b 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -475,14 +475,16 @@ def serialize_event( if config.as_client_event: d = config.event_format(d) - # If the event is a redaction, copy the redacts field from the content to - # top-level for backwards compatibility. - if ( - e.type == EventTypes.Redaction - and e.room_version.updated_redaction_rules - and e.redacts is not None - ): - d["redacts"] = e.redacts + # If the event is a redaction, the field with the redacted event ID appears + # in a different location depending on the room version. e.redacts handles + # fetching from the proper location; copy it to the other location for forwards- + # and backwards-compatibility with clients. + if e.type == EventTypes.Redaction and e.redacts is not None: + if e.room_version.updated_redaction_rules: + d["redacts"] = e.redacts + else: + d["content"] = dict(d["content"]) + d["content"]["redacts"] = e.redacts only_event_fields = config.only_event_fields if only_event_fields: diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 6028886bd6..180b635ea6 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -13,10 +13,12 @@ # limitations under the License. from typing import List, Optional +from parameterized import parameterized + from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, RelationTypes -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.rest import admin from synapse.rest.client import login, room, sync from synapse.server import HomeServer @@ -569,50 +571,81 @@ class RedactionsTestCase(HomeserverTestCase): self.assertIn("body", event_dict["content"], event_dict) self.assertEqual("I'm in a thread!", event_dict["content"]["body"]) - def test_content_redaction(self) -> None: - """MSC2174 moved the redacts property to the content.""" + @parameterized.expand( + [ + # Tuples of: + # Room version + # Boolean: True if the redaction event content should include the event ID. + # Boolean: true if the resulting redaction event is expected to include the + # event ID in the content. + (RoomVersions.V10, False, False), + (RoomVersions.V11, True, True), + (RoomVersions.V11, False, True), + ] + ) + def test_redaction_content( + self, room_version: RoomVersion, include_content: bool, expect_content: bool + ) -> None: + """ + Room version 11 moved the redacts property to the content. + + Ensure that the event gets created properly and that the Client-Server + API servers the proper backwards-compatible version. + """ # Create a room with the newer room version. room_id = self.helper.create_room_as( self.mod_user_id, tok=self.mod_access_token, - room_version=RoomVersions.V11.identifier, + room_version=room_version.identifier, ) # Create an event. b = self.helper.send(room_id=room_id, tok=self.mod_access_token) event_id = b["event_id"] - # Attempt to redact it with a bogus event ID. - self._redact_event( + # Ensure the event ID in the URL and the content must match. + if include_content: + self._redact_event( + self.mod_access_token, + room_id, + event_id, + expect_code=400, + content={"redacts": "foo"}, + ) + + # Redact it for real. + result = self._redact_event( self.mod_access_token, room_id, event_id, - expect_code=400, - content={"redacts": "foo"}, + content={"redacts": event_id} if include_content else {}, ) - - # Redact it for real. - self._redact_event(self.mod_access_token, room_id, event_id) + redaction_event_id = result["event_id"] # Sync the room, to get the id of the create event timeline = self._sync_room_timeline(self.mod_access_token, room_id) redact_event = timeline[-1] self.assertEqual(redact_event["type"], EventTypes.Redaction) - # The redacts key should be in the content. + # The redacts key should be in the content and the redacts keys. self.assertEquals(redact_event["content"]["redacts"], event_id) - - # It should also be copied as the top-level redacts field for backwards - # compatibility. self.assertEquals(redact_event["redacts"], event_id) # But it isn't actually part of the event. def get_event(txn: LoggingTransaction) -> JsonDict: return db_to_json( - main_datastore._fetch_event_rows(txn, [event_id])[event_id].json + main_datastore._fetch_event_rows(txn, [redaction_event_id])[ + redaction_event_id + ].json ) main_datastore = self.hs.get_datastores().main event_json = self.get_success( main_datastore.db_pool.runInteraction("get_event", get_event) ) - self.assertNotIn("redacts", event_json) + self.assertEquals(event_json["type"], EventTypes.Redaction) + if expect_content: + self.assertNotIn("redacts", event_json) + self.assertEquals(event_json["content"]["redacts"], event_id) + else: + self.assertEquals(event_json["redacts"], event_id) + self.assertNotIn("redacts", event_json["content"]) From 9c462f18a4b6969f627349d956b9161968ab8252 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 3 Aug 2023 12:42:19 +0000 Subject: [PATCH 288/562] Allow modules to check whether the current worker is configured to run background tasks. (#15991) --- changelog.d/15991.misc | 1 + synapse/module_api/__init__.py | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 changelog.d/15991.misc diff --git a/changelog.d/15991.misc b/changelog.d/15991.misc new file mode 100644 index 0000000000..18f388cff8 --- /dev/null +++ b/changelog.d/15991.misc @@ -0,0 +1 @@ +Allow modules to check whether the current worker is configured to run background tasks. \ No newline at end of file diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 95f7800111..ba1a925003 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1230,6 +1230,18 @@ class ModuleApi: f, ) + def should_run_background_tasks(self) -> bool: + """ + Return true if and only if the current worker is configured to run + background tasks. + There should only be one worker configured to run background tasks, so + this is helpful when you need to only run a task on one worker but don't + have any other good way to choose which one. + + Added in Synapse v1.89.0. + """ + return self._hs.config.worker.run_background_tasks + async def sleep(self, seconds: float) -> None: """Sleeps for the given number of seconds. From f0a860908ba0309c89c9dba452d99b4f9c6928f7 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Thu, 3 Aug 2023 20:36:55 +0200 Subject: [PATCH 289/562] Allow config of the backoff algorithm for the federation client. (#15754) Adds three new configuration variables: * destination_min_retry_interval is identical to before (10mn). * destination_retry_multiplier is now 2 instead of 5, the maximum value will be reached slower. * destination_max_retry_interval is one day instead of (essentially) infinity. Capping this will cause destinations to continue to be retried sometimes instead of being lost forever. The previous value was 2 ^ 62 milliseconds. --- changelog.d/15754.misc | 1 + .../configuration/config_documentation.md | 11 +++++++ synapse/config/federation.py | 18 ++++++++++++ synapse/util/retryutils.py | 29 ++++++++++--------- tests/storage/test_transactions.py | 9 ++++-- tests/util/test_retryutils.py | 22 +++++++------- 6 files changed, 64 insertions(+), 26 deletions(-) create mode 100644 changelog.d/15754.misc diff --git a/changelog.d/15754.misc b/changelog.d/15754.misc new file mode 100644 index 0000000000..4314d415a3 --- /dev/null +++ b/changelog.d/15754.misc @@ -0,0 +1 @@ +Allow for the configuration of the backoff algorithm for federation destinations. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 4e6fcd085a..c32608da2b 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1242,6 +1242,14 @@ like sending a federation transaction. * `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts. * `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts. +The following options control the retry logic when communicating with a specific homeserver destination. +Unlike the previous configuration options, these values apply across all requests +for a given destination and the state of the backoff is stored in the database. + +* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m. +* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2. +* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week. + Example configuration: ```yaml federation: @@ -1250,6 +1258,9 @@ federation: max_long_retry_delay: 100s max_short_retries: 5 max_long_retries: 20 + destination_min_retry_interval: 30s + destination_retry_multiplier: 5 + destination_max_retry_interval: 12h ``` --- ## Caching diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 0e1cb8b6e3..97636039b8 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -65,5 +65,23 @@ class FederationConfig(Config): self.max_long_retries = federation_config.get("max_long_retries", 10) self.max_short_retries = federation_config.get("max_short_retries", 3) + # Allow for the configuration of the backoff algorithm used + # when trying to reach an unavailable destination. + # Unlike previous configuration those values applies across + # multiple requests and the state of the backoff is stored on DB. + self.destination_min_retry_interval_ms = Config.parse_duration( + federation_config.get("destination_min_retry_interval", "10m") + ) + self.destination_retry_multiplier = federation_config.get( + "destination_retry_multiplier", 2 + ) + self.destination_max_retry_interval_ms = min( + Config.parse_duration( + federation_config.get("destination_max_retry_interval", "7d") + ), + # Set a hard-limit to not overflow the database column. + 2**62, + ) + _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index dcc037b982..27e9fc976c 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -27,15 +27,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -# the initial backoff, after the first transaction fails -MIN_RETRY_INTERVAL = 10 * 60 * 1000 - -# how much we multiply the backoff by after each subsequent fail -RETRY_MULTIPLIER = 5 - -# a cap on the backoff. (Essentially none) -MAX_RETRY_INTERVAL = 2**62 - class NotRetryingDestination(Exception): def __init__(self, retry_last_ts: int, retry_interval: int, destination: str): @@ -169,6 +160,16 @@ class RetryDestinationLimiter: self.notifier = notifier self.replication_client = replication_client + self.destination_min_retry_interval_ms = ( + self.store.hs.config.federation.destination_min_retry_interval_ms + ) + self.destination_retry_multiplier = ( + self.store.hs.config.federation.destination_retry_multiplier + ) + self.destination_max_retry_interval_ms = ( + self.store.hs.config.federation.destination_max_retry_interval_ms + ) + def __enter__(self) -> None: pass @@ -220,13 +221,15 @@ class RetryDestinationLimiter: # We couldn't connect. if self.retry_interval: self.retry_interval = int( - self.retry_interval * RETRY_MULTIPLIER * random.uniform(0.8, 1.4) + self.retry_interval + * self.destination_retry_multiplier + * random.uniform(0.8, 1.4) ) - if self.retry_interval >= MAX_RETRY_INTERVAL: - self.retry_interval = MAX_RETRY_INTERVAL + if self.retry_interval >= self.destination_max_retry_interval_ms: + self.retry_interval = self.destination_max_retry_interval_ms else: - self.retry_interval = MIN_RETRY_INTERVAL + self.retry_interval = self.destination_min_retry_interval_ms logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index 2fab84a529..ef06b50dbb 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -17,7 +17,6 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer from synapse.storage.databases.main.transactions import DestinationRetryTimings from synapse.util import Clock -from synapse.util.retryutils import MAX_RETRY_INTERVAL from tests.unittest import HomeserverTestCase @@ -57,8 +56,14 @@ class TransactionStoreTestCase(HomeserverTestCase): self.get_success(d) def test_large_destination_retry(self) -> None: + max_retry_interval_ms = ( + self.hs.config.federation.destination_max_retry_interval_ms + ) d = self.store.set_destination_retry_timings( - "example.com", MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL + "example.com", + max_retry_interval_ms, + max_retry_interval_ms, + max_retry_interval_ms, ) self.get_success(d) diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 5f8f4e76b5..1277e1a865 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -11,12 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.retryutils import ( - MIN_RETRY_INTERVAL, - RETRY_MULTIPLIER, - NotRetryingDestination, - get_retry_limiter, -) +from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from tests.unittest import HomeserverTestCase @@ -42,6 +37,11 @@ class RetryLimiterTestCase(HomeserverTestCase): limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store)) + min_retry_interval_ms = ( + self.hs.config.federation.destination_min_retry_interval_ms + ) + retry_multiplier = self.hs.config.federation.destination_retry_multiplier + self.pump(1) try: with limiter: @@ -57,7 +57,7 @@ class RetryLimiterTestCase(HomeserverTestCase): assert new_timings is not None self.assertEqual(new_timings.failure_ts, failure_ts) self.assertEqual(new_timings.retry_last_ts, failure_ts) - self.assertEqual(new_timings.retry_interval, MIN_RETRY_INTERVAL) + self.assertEqual(new_timings.retry_interval, min_retry_interval_ms) # now if we try again we should get a failure self.get_failure( @@ -68,7 +68,7 @@ class RetryLimiterTestCase(HomeserverTestCase): # advance the clock and try again # - self.pump(MIN_RETRY_INTERVAL) + self.pump(min_retry_interval_ms) limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store)) self.pump(1) @@ -87,16 +87,16 @@ class RetryLimiterTestCase(HomeserverTestCase): self.assertEqual(new_timings.failure_ts, failure_ts) self.assertEqual(new_timings.retry_last_ts, retry_ts) self.assertGreaterEqual( - new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 0.5 + new_timings.retry_interval, min_retry_interval_ms * retry_multiplier * 0.5 ) self.assertLessEqual( - new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0 + new_timings.retry_interval, min_retry_interval_ms * retry_multiplier * 2.0 ) # # one more go, with success # - self.reactor.advance(MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0) + self.reactor.advance(min_retry_interval_ms * retry_multiplier * 2.0) limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store)) self.pump(1) From 0a5f4f766514b84aff84ff17dffd5301a437c797 Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 3 Aug 2023 11:43:51 -0700 Subject: [PATCH 290/562] Move support for application service query parameter authorization behind a configuration option (#16017) --- changelog.d/16017.removal | 1 + docs/upgrade.md | 16 +++- .../configuration/config_documentation.md | 14 +++ synapse/appservice/api.py | 34 ++++++-- synapse/config/appservice.py | 8 ++ tests/appservice/test_api.py | 85 ++++++++++++++++++- 6 files changed, 144 insertions(+), 14 deletions(-) create mode 100644 changelog.d/16017.removal diff --git a/changelog.d/16017.removal b/changelog.d/16017.removal new file mode 100644 index 0000000000..6b72442892 --- /dev/null +++ b/changelog.d/16017.removal @@ -0,0 +1 @@ +Move support for application service query parameter authorization behind a configuration option. diff --git a/docs/upgrade.md b/docs/upgrade.md index 5dde6c769e..f50a279e98 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,21 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.90.0 + +## App service query parameter authorization is now a configuration option + +Synapse v1.81.0 deprecated application service authorization via query parameters as this is +considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via +[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure +query parameter authorization redundant. Since removing the ability to continue to use query parameters could break +backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`. +This option defaults to false, but can be activated by adding +```yaml +use_appservice_legacy_authorization: true +``` +to your configuration. + # Upgrading to v1.89.0 ## Removal of unspecced `user` property for `/register` @@ -97,7 +112,6 @@ The standard `username` property should be used instead. See the [Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions) for more information. - # Upgrading to v1.88.0 ## Minimum supported Python version diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index c32608da2b..2987c9332d 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2848,6 +2848,20 @@ Example configuration: ```yaml track_appservice_user_ips: true ``` +--- +### `use_appservice_legacy_authorization` + +Whether to send the application service access tokens via the `access_token` query parameter +per older versions of the Matrix specification. Defaults to false. Set to true to enable sending +access tokens via a query parameter. + +**Enabling this option is considered insecure and is not recommended. ** + +Example configuration: +```yaml +use_appservice_legacy_authorization: true +``` + --- ### `macaroon_secret_key` diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 359999f680..de7a94bf26 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -16,7 +16,6 @@ import logging import urllib.parse from typing import ( TYPE_CHECKING, - Any, Dict, Iterable, List, @@ -25,6 +24,7 @@ from typing import ( Sequence, Tuple, TypeVar, + Union, ) from prometheus_client import Counter @@ -119,6 +119,7 @@ class ApplicationServiceApi(SimpleHttpClient): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.clock = hs.get_clock() + self.config = hs.config.appservice self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache( hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS @@ -132,9 +133,12 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None try: + args = None + if self.config.use_appservice_legacy_authorization: + args = {"access_token": service.hs_token} response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}", - {"access_token": service.hs_token}, + args, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if response is not None: # just an empty json object @@ -155,9 +159,12 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None try: + args = None + if self.config.use_appservice_legacy_authorization: + args = {"access_token": service.hs_token} response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}", - {"access_token": service.hs_token}, + args, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if response is not None: # just an empty json object @@ -190,10 +197,12 @@ class ApplicationServiceApi(SimpleHttpClient): assert service.hs_token is not None try: - args: Mapping[Any, Any] = { - **fields, - b"access_token": service.hs_token, - } + args: Mapping[bytes, Union[List[bytes], str]] = fields + if self.config.use_appservice_legacy_authorization: + args = { + **fields, + b"access_token": service.hs_token, + } response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}", args=args, @@ -231,9 +240,12 @@ class ApplicationServiceApi(SimpleHttpClient): # This is required by the configuration. assert service.hs_token is not None try: + args = None + if self.config.use_appservice_legacy_authorization: + args = {"access_token": service.hs_token} info = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}", - {"access_token": service.hs_token}, + args, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -344,10 +356,14 @@ class ApplicationServiceApi(SimpleHttpClient): } try: + args = None + if self.config.use_appservice_legacy_authorization: + args = {"access_token": service.hs_token} + await self.put_json( f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}", json_body=body, - args={"access_token": service.hs_token}, + args=args, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if logger.isEnabledFor(logging.DEBUG): diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index c2710fdf04..919f81a9b7 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -43,6 +43,14 @@ class AppServiceConfig(Config): ) self.track_appservice_user_ips = config.get("track_appservice_user_ips", False) + self.use_appservice_legacy_authorization = config.get( + "use_appservice_legacy_authorization", False + ) + if self.use_appservice_legacy_authorization: + logger.warning( + "The use of appservice legacy authorization via query params is deprecated" + " and should be considered insecure." + ) def load_appservices( diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 807dc2f21c..3c635e3dcb 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Mapping, Sequence, Union +from typing import Any, List, Mapping, Optional, Sequence, Union from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor @@ -22,6 +22,7 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest +from tests.unittest import override_config PROTOCOL = "myproto" TOKEN = "myastoken" @@ -39,7 +40,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): hs_token=TOKEN, ) - def test_query_3pe_authenticates_token(self) -> None: + def test_query_3pe_authenticates_token_via_header(self) -> None: """ Tests that 3pe queries to the appservice are authenticated with the appservice's token. @@ -74,12 +75,88 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): args: Mapping[Any, Any], headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], ) -> List[JsonDict]: - # Ensure the access token is passed as both a header and query arg. - if not headers.get("Authorization") or not args.get(b"access_token"): + # Ensure the access token is passed as a header. + if not headers or not headers.get("Authorization"): raise RuntimeError("Access token not provided") + # ... and not as a query param + if b"access_token" in args: + raise RuntimeError( + "Access token should not be passed as a query param." + ) self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.request_url = url + if url == URL_USER: + return SUCCESS_RESULT_USER + elif url == URL_LOCATION: + return SUCCESS_RESULT_LOCATION + else: + raise RuntimeError( + "URL provided was invalid. This should never be seen." + ) + + # We assign to a method, which mypy doesn't like. + self.api.get_json = Mock(side_effect=get_json) # type: ignore[assignment] + + result = self.get_success( + self.api.query_3pe(self.service, "user", PROTOCOL, {b"some": [b"field"]}) + ) + self.assertEqual(self.request_url, URL_USER) + self.assertEqual(result, SUCCESS_RESULT_USER) + result = self.get_success( + self.api.query_3pe( + self.service, "location", PROTOCOL, {b"some": [b"field"]} + ) + ) + self.assertEqual(self.request_url, URL_LOCATION) + self.assertEqual(result, SUCCESS_RESULT_LOCATION) + + @override_config({"use_appservice_legacy_authorization": True}) + def test_query_3pe_authenticates_token_via_param(self) -> None: + """ + Tests that 3pe queries to the appservice are authenticated + with the appservice's token. + """ + + SUCCESS_RESULT_USER = [ + { + "protocol": PROTOCOL, + "userid": "@a:user", + "fields": { + "more": "fields", + }, + } + ] + SUCCESS_RESULT_LOCATION = [ + { + "protocol": PROTOCOL, + "alias": "#a:room", + "fields": { + "more": "fields", + }, + } + ] + + URL_USER = f"{URL}/_matrix/app/v1/thirdparty/user/{PROTOCOL}" + URL_LOCATION = f"{URL}/_matrix/app/v1/thirdparty/location/{PROTOCOL}" + + self.request_url = None + + async def get_json( + url: str, + args: Mapping[Any, Any], + headers: Optional[ + Mapping[Union[str, bytes], Sequence[Union[str, bytes]]] + ] = None, + ) -> List[JsonDict]: + # Ensure the access token is passed as a both a query param and in the headers. + if not args.get(b"access_token"): + raise RuntimeError("Access token should be provided in query params.") + if not headers or not headers.get("Authorization"): + raise RuntimeError("Access token should be provided in auth headers.") + self.assertEqual(args.get(b"access_token"), TOKEN) + self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) self.request_url = url if url == URL_USER: return SUCCESS_RESULT_USER From d98a43d9226cbb4b9ab5ad3abd9b630548c2f09f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 4 Aug 2023 07:47:18 -0400 Subject: [PATCH 291/562] Stabilize support for MSC3970: updated transaction semantics (scope to `device_id`) (#15629) For now this maintains compatible with old Synapses by falling back to using transaction semantics on a per-access token. A future version of Synapse will drop support for this. --- changelog.d/15629.feature | 1 + synapse/config/experimental.py | 9 ----- synapse/events/utils.py | 42 ++++++++++++------------ synapse/handlers/message.py | 12 +++---- synapse/rest/client/transactions.py | 12 +++---- synapse/server.py | 4 +-- synapse/storage/databases/main/events.py | 15 ++++----- synapse/storage/schema/__init__.py | 5 ++- synapse/types/__init__.py | 7 ++-- 9 files changed, 48 insertions(+), 59 deletions(-) create mode 100644 changelog.d/15629.feature diff --git a/changelog.d/15629.feature b/changelog.d/15629.feature new file mode 100644 index 0000000000..16264effca --- /dev/null +++ b/changelog.d/15629.feature @@ -0,0 +1 @@ +Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1695ed8ca3..ac9449b18f 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -216,12 +216,6 @@ class MSC3861: ("session_lifetime",), ) - if not root.experimental.msc3970_enabled: - raise ConfigError( - "experimental_features.msc3970_enabled must be 'true' when OAuth delegation is enabled", - ("experimental_features", "msc3970_enabled"), - ) - @attr.s(auto_attribs=True, frozen=True, slots=True) class MSC3866Config: @@ -397,9 +391,6 @@ class ExperimentalConfig(Config): "Invalid MSC3861 configuration", ("experimental", "msc3861") ) from exc - # MSC3970: Scope transaction IDs to devices - self.msc3970_enabled = experimental.get("msc3970_enabled", self.msc3861.enabled) - # Check that none of the other config options conflict with MSC3861 when enabled self.msc3861.check_config_conflicts(self.root) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 967a6c245b..52acb21955 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -394,7 +394,6 @@ def serialize_event( time_now_ms: int, *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, - msc3970_enabled: bool = False, ) -> JsonDict: """Serialize event for clients @@ -402,8 +401,6 @@ def serialize_event( e time_now_ms config: Event serialization config - msc3970_enabled: Whether MSC3970 is enabled. It changes whether we should - include the `transaction_id` in the event's `unsigned` section. Returns: The serialized event dictionary. @@ -429,38 +426,46 @@ def serialize_event( e.unsigned["redacted_because"], time_now_ms, config=config, - msc3970_enabled=msc3970_enabled, ) # If we have a txn_id saved in the internal_metadata, we should include it in the # unsigned section of the event if it was sent by the same session as the one # requesting the event. txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None) - if txn_id is not None and config.requester is not None: - # For the MSC3970 rules to be applied, we *need* to have the device ID in the - # event internal metadata. Since we were not recording them before, if it hasn't - # been recorded, we fallback to the old behaviour. + if ( + txn_id is not None + and config.requester is not None + and config.requester.user.to_string() == e.sender + ): + # Some events do not have the device ID stored in the internal metadata, + # this includes old events as well as those created by appservice, guests, + # or with tokens minted with the admin API. For those events, fallback + # to using the access token instead. event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None) - if msc3970_enabled and event_device_id is not None: + if event_device_id is not None: if event_device_id == config.requester.device_id: d["unsigned"]["transaction_id"] = txn_id else: - # The pre-MSC3970 behaviour is to only include the transaction ID if the - # event was sent from the same access token. For regular users, we can use - # the access token ID to determine this. For guests, we can't, but since - # each guest only has one access token, we can just check that the event was - # sent by the same user as the one requesting the event. + # Fallback behaviour: only include the transaction ID if the event + # was sent from the same access token. + # + # For regular users, the access token ID can be used to determine this. + # This includes access tokens minted with the admin API. + # + # For guests and appservice users, we can't check the access token ID + # so assume it is the same session. event_token_id: Optional[int] = getattr( e.internal_metadata, "token_id", None ) - if config.requester.user.to_string() == e.sender and ( + if ( ( event_token_id is not None and config.requester.access_token_id is not None and event_token_id == config.requester.access_token_id ) or config.requester.is_guest + or config.requester.app_service ): d["unsigned"]["transaction_id"] = txn_id @@ -504,9 +509,6 @@ class EventClientSerializer: clients. """ - def __init__(self, *, msc3970_enabled: bool = False): - self._msc3970_enabled = msc3970_enabled - def serialize_event( self, event: Union[JsonDict, EventBase], @@ -531,9 +533,7 @@ class EventClientSerializer: if not isinstance(event, EventBase): return event - serialized_event = serialize_event( - event, time_now, config=config, msc3970_enabled=self._msc3970_enabled - ) + serialized_event = serialize_event(event, time_now, config=config) # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c656e07d37..d485f21e49 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -561,8 +561,6 @@ class EventCreationHandler: expiry_ms=30 * 60 * 1000, ) - self._msc3970_enabled = hs.config.experimental.msc3970_enabled - async def create_event( self, requester: Requester, @@ -897,9 +895,8 @@ class EventCreationHandler: """ existing_event_id = None - if self._msc3970_enabled and requester.device_id: - # When MSC3970 is enabled, we lookup for events sent by the same device first, - # and fallback to the old behaviour if none were found. + # According to the spec, transactions are scoped to a user's device ID. + if requester.device_id: existing_event_id = ( await self.store.get_event_id_from_transaction_id_and_device_id( room_id, @@ -911,8 +908,9 @@ class EventCreationHandler: if existing_event_id: return existing_event_id - # Pre-MSC3970, we looked up for events that were sent by the same session by - # using the access token ID. + # Some requsters don't have device IDs (appservice, guests, and access + # tokens minted with the admin API), fallback to checking the access token + # ID, which should be close enough. if requester.access_token_id: existing_event_id = ( await self.store.get_event_id_from_transaction_id_and_token_id( diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 0d8a63d8be..3d814c404d 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -50,8 +50,6 @@ class HttpTransactionCache: # for at *LEAST* 30 mins, and at *MOST* 60 mins. self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) - self._msc3970_enabled = hs.config.experimental.msc3970_enabled - def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used with TransactionCache for idempotent requests. @@ -78,18 +76,20 @@ class HttpTransactionCache: elif requester.app_service is not None: return (path, "appservice", requester.app_service.id) - # With MSC3970, we use the user ID and device ID as the transaction key - elif self._msc3970_enabled: + # Use the user ID and device ID as the transaction key. + elif requester.device_id: assert requester.user, "Requester must have a user" assert requester.device_id, "Requester must have a device_id" return (path, "user", requester.user, requester.device_id) - # Otherwise, the pre-MSC3970 behaviour is to use the access token ID + # Some requsters don't have device IDs, these are mostly handled above + # (appservice and guest users), but does not cover access tokens minted + # by the admin API. Use the access token ID instead. else: assert ( requester.access_token_id is not None ), "Requester must have an access_token_id" - return (path, "user", requester.access_token_id) + return (path, "user_admin", requester.access_token_id) def fetch_or_execute_request( self, diff --git a/synapse/server.py b/synapse/server.py index 8430f99ef2..e753ff0377 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -785,9 +785,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer( - msc3970_enabled=self.config.experimental.msc3970_enabled - ) + return EventClientSerializer() @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index bd3f14fb71..c1353b18c1 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -127,8 +127,6 @@ class PersistEventsStore: self._backfill_id_gen: AbstractStreamIdGenerator = self.store._backfill_id_gen self._stream_id_gen: AbstractStreamIdGenerator = self.store._stream_id_gen - self._msc3970_enabled = hs.config.experimental.msc3970_enabled - @trace async def _persist_events_and_state_updates( self, @@ -1012,9 +1010,11 @@ class PersistEventsStore: ) ) - # Pre-MSC3970, we rely on the access_token_id to scope the txn_id for events. - # Since this is an experimental flag, we still store the mapping even if the - # flag is disabled. + # Synapse usually relies on the device_id to scope transactions for events, + # except for users without device IDs (appservice, guests, and access + # tokens minted with the admin API) which use the access token ID instead. + # + # TODO https://github.com/matrix-org/synapse/issues/16042 if to_insert_token_id: self.db_pool.simple_insert_many_txn( txn, @@ -1030,10 +1030,7 @@ class PersistEventsStore: values=to_insert_token_id, ) - # With MSC3970, we rely on the device_id instead to scope the txn_id for events. - # We're only inserting if MSC3970 is *enabled*, because else the pre-MSC3970 - # behaviour would allow for a UNIQUE constraint violation on this table - if to_insert_device_id and self._msc3970_enabled: + if to_insert_device_id: self.db_pool.simple_insert_many_txn( txn, table="event_txn_id_device_id", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index d3ec648f6d..7de9949a5b 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 79 # remember to update the list below when updating +SCHEMA_VERSION = 80 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -110,6 +110,9 @@ Changes in SCHEMA_VERSION = 78 Changes in SCHEMA_VERSION = 79 - Add tables to handle in DB read-write locks. - Add some mitigations for a painful race between foreground and background updates, cf #15677. + +Changes in SCHEMA_VERSION = 80 + - The event_txn_id_device_id is always written to for new events. """ diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index fdfd465c8d..39a1ae4ac3 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -117,11 +117,12 @@ class Requester: Attributes: user: id of the user making the request - access_token_id: *ID* of the access token used for this - request, or None if it came via the appservice API or similar + access_token_id: *ID* of the access token used for this request, or + None for appservices, guests, and tokens generated by the admin API is_guest: True if the user making this request is a guest user shadow_banned: True if the user making this request has been shadow-banned. - device_id: device_id which was set at authentication time + device_id: device_id which was set at authentication time, or + None for appservices, guests, and tokens generated by the admin API app_service: the AS requesting on behalf of the user authenticated_entity: The entity that authenticated when making the request. This is different to the user_id when an admin user or the server is From 84ae2e3f6fb86115df767bb2f1fb16ac2fbaa7c3 Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 4 Aug 2023 10:49:54 -0700 Subject: [PATCH 292/562] Fix deletion for Dehydrated Devices (#16046) --- changelog.d/16046.bugfix | 1 + synapse/handlers/device.py | 16 ++++ synapse/rest/client/devices.py | 14 ++- tests/rest/client/test_devices.py | 139 +++++++++++++++++++++++++++++- 4 files changed, 165 insertions(+), 5 deletions(-) create mode 100644 changelog.d/16046.bugfix diff --git a/changelog.d/16046.bugfix b/changelog.d/16046.bugfix new file mode 100644 index 0000000000..ce5a9ae4b5 --- /dev/null +++ b/changelog.d/16046.bugfix @@ -0,0 +1 @@ +Fix deletion in dehydrated devices v2. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index f3a713f5fa..b7bf70a72d 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -722,6 +722,22 @@ class DeviceHandler(DeviceWorkerHandler): return {"success": True} + async def delete_dehydrated_device(self, user_id: str, device_id: str) -> None: + """ + Delete a stored dehydrated device. + + Args: + user_id: the user_id to delete the device from + device_id: id of the dehydrated device to delete + """ + success = await self.store.remove_dehydrated_device(user_id, device_id) + + if not success: + raise errors.NotFoundError() + + await self.delete_devices(user_id, [device_id]) + await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id) + @wrap_as_background_process("_handle_new_device_update_async") async def _handle_new_device_update_async(self) -> None: """Called when we have a new local device list update that we need to diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 690d2ec406..dd3f7fd666 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -513,10 +513,8 @@ class DehydratedDeviceV2Servlet(RestServlet): if dehydrated_device is not None: (device_id, device_data) = dehydrated_device - result = await self.device_handler.rehydrate_device( - requester.user.to_string(), - self.auth.get_access_token_from_request(request), - device_id, + await self.device_handler.delete_dehydrated_device( + requester.user.to_string(), device_id ) result = {"device_id": device_id} @@ -538,6 +536,14 @@ class DehydratedDeviceV2Servlet(RestServlet): requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() + old_dehydrated_device = await self.device_handler.get_dehydrated_device(user_id) + + # if an old device exists, delete it before creating a new one + if old_dehydrated_device: + await self.device_handler.delete_dehydrated_device( + user_id, old_dehydrated_device[0] + ) + device_info = submission.dict() if "device_keys" not in device_info.keys(): raise SynapseError( diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py index b7d420cfec..3cf29c10ea 100644 --- a/tests/rest/client/test_devices.py +++ b/tests/rest/client/test_devices.py @@ -379,4 +379,141 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): access_token=token, shorthand=False, ) - self.assertEqual(channel.code, 404) + self.assertEqual(channel.code, 401) + + @unittest.override_config( + {"experimental_features": {"msc2697_enabled": False, "msc3814_enabled": True}} + ) + def test_msc3814_dehydrated_device_delete_works(self) -> None: + user = self.register_user("mikey", "pass") + token = self.login(user, "pass", device_id="device1") + content: JsonDict = { + "device_data": { + "algorithm": "m.dehydration.v1.olm", + }, + "device_id": "device2", + "initial_device_display_name": "foo bar", + "device_keys": { + "user_id": "@mikey:test", + "device_id": "device2", + "valid_until_ts": "80", + "algorithms": [ + "m.olm.curve25519-aes-sha2", + ], + "keys": { + ":": "", + }, + "signatures": { + "": {":": ""} + }, + }, + } + channel = self.make_request( + "PUT", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + content=content, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + device_id = channel.json_body.get("device_id") + assert device_id is not None + self.assertIsInstance(device_id, str) + self.assertEqual("device2", device_id) + + # ensure that keys were uploaded and available + channel = self.make_request( + "POST", + "/_matrix/client/r0/keys/query", + { + "device_keys": { + user: ["device2"], + }, + }, + token, + ) + self.assertEqual( + channel.json_body["device_keys"][user]["device2"]["keys"], + { + ":": "", + }, + ) + + # delete the dehydrated device + channel = self.make_request( + "DELETE", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + + # ensure that keys are no longer available for deleted device + channel = self.make_request( + "POST", + "/_matrix/client/r0/keys/query", + { + "device_keys": { + user: ["device2"], + }, + }, + token, + ) + self.assertEqual(channel.json_body["device_keys"], {"@mikey:test": {}}) + + # check that an old device is deleted when user PUTs a new device + # First, create a device + content["device_id"] = "device3" + content["device_keys"]["device_id"] = "device3" + channel = self.make_request( + "PUT", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + content=content, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + device_id = channel.json_body.get("device_id") + assert device_id is not None + self.assertIsInstance(device_id, str) + self.assertEqual("device3", device_id) + + # create a second device without deleting first device + content["device_id"] = "device4" + content["device_keys"]["device_id"] = "device4" + channel = self.make_request( + "PUT", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + content=content, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + device_id = channel.json_body.get("device_id") + assert device_id is not None + self.assertIsInstance(device_id, str) + self.assertEqual("device4", device_id) + + # check that the second device that was created is what is returned when we GET + channel = self.make_request( + "GET", + "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device", + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + returned_device_id = channel.json_body["device_id"] + self.assertEqual(returned_device_id, "device4") + + # and that if we query the keys for the first device they are not there + channel = self.make_request( + "POST", + "/_matrix/client/r0/keys/query", + { + "device_keys": { + user: ["device3"], + }, + }, + token, + ) + self.assertEqual(channel.json_body["device_keys"], {"@mikey:test": {}}) From 4f6da0dba01858e2114ef292dc90bef5cb3576a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 10:52:04 +0000 Subject: [PATCH 293/562] Bump phonenumbers from 8.13.14 to 8.13.18 (#16076) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 75eac9dc7f..ec01a663e5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1610,13 +1610,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.14" +version = "8.13.18" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.14-py2.py3-none-any.whl", hash = "sha256:a4b20b6ba7dd402728f5cc8e86e1f29b1a873af45f5381dbee7e3083af497ff6"}, - {file = "phonenumbers-8.13.14.tar.gz", hash = "sha256:5fa952b4abf9fccdaf1f130d96114a520c48890d4091b50a064e22c0fdc12dec"}, + {file = "phonenumbers-8.13.18-py2.py3-none-any.whl", hash = "sha256:3d802739a22592e4127139349937753dee9b6a20bdd5d56847cd885bdc766b1f"}, + {file = "phonenumbers-8.13.18.tar.gz", hash = "sha256:b360c756252805d44b447b5bca6d250cf6bd6c69b6f0f4258f3bfe5ab81bef69"}, ] [[package]] From ec8499206efd340287234e8caf76b56600038e89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 10:59:04 +0000 Subject: [PATCH 294/562] Bump types-setuptools from 68.0.0.0 to 68.0.0.3 (#16079) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index ec01a663e5..a8951a42ab 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3052,13 +3052,13 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "68.0.0.0" +version = "68.0.0.3" description = "Typing stubs for setuptools" optional = false python-versions = "*" files = [ - {file = "types-setuptools-68.0.0.0.tar.gz", hash = "sha256:fc958b4123b155ffc069a66d3af5fe6c1f9d0600c35c0c8444b2ab4147112641"}, - {file = "types_setuptools-68.0.0.0-py3-none-any.whl", hash = "sha256:cc00e09ba8f535362cbe1ea8b8407d15d14b59c57f4190cceaf61a9e57616446"}, + {file = "types-setuptools-68.0.0.3.tar.gz", hash = "sha256:d57ae6076100b5704b3cc869fdefc671e1baf4c2cd6643f84265dfc0b955bf05"}, + {file = "types_setuptools-68.0.0.3-py3-none-any.whl", hash = "sha256:fec09e5c18264c5c09351c00be01a34456fb7a88e457abe97401325f84ad9d36"}, ] [[package]] From 34b5db1fbc583b53ae89b3b3207144e439548593 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 08:15:00 -0400 Subject: [PATCH 295/562] Bump furo from 2023.5.20 to 2023.7.26 (#16077) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index a8951a42ab..78516c9dc0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -558,13 +558,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", [[package]] name = "furo" -version = "2023.5.20" +version = "2023.7.26" description = "A clean customisable Sphinx documentation theme." optional = false python-versions = ">=3.7" files = [ - {file = "furo-2023.5.20-py3-none-any.whl", hash = "sha256:594a8436ddfe0c071f3a9e9a209c314a219d8341f3f1af33fdf7c69544fab9e6"}, - {file = "furo-2023.5.20.tar.gz", hash = "sha256:40e09fa17c6f4b22419d122e933089226dcdb59747b5b6c79363089827dea16f"}, + {file = "furo-2023.7.26-py3-none-any.whl", hash = "sha256:1c7936929ec57c5ddecc7c85f07fa8b2ce536b5c89137764cca508be90e11efd"}, + {file = "furo-2023.7.26.tar.gz", hash = "sha256:257f63bab97aa85213a1fa24303837a3c3f30be92901ec732fea74290800f59e"}, ] [package.dependencies] From eca592b121895b9c4e285523ddb8fdeb9380eea4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 12:19:45 +0000 Subject: [PATCH 296/562] Bump types-opentracing from 2.4.10.5 to 2.4.10.6 (#16078) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 78516c9dc0..2aae6c6b4e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2980,13 +2980,13 @@ files = [ [[package]] name = "types-opentracing" -version = "2.4.10.5" +version = "2.4.10.6" description = "Typing stubs for opentracing" optional = false python-versions = "*" files = [ - {file = "types-opentracing-2.4.10.5.tar.gz", hash = "sha256:852d13ab1324832835d50c00cfd58b9267f0e79ec3189e5664c2a90c26880fd4"}, - {file = "types_opentracing-2.4.10.5-py3-none-any.whl", hash = "sha256:8f12ab4dce3e298a8e6655da9a6d52171e7a275357eae4cec22a1663d94023a7"}, + {file = "types-opentracing-2.4.10.6.tar.gz", hash = "sha256:87a1bdfce9de5e555e30497663583b9b9c3bb494d029ef9806aa1f137c19e744"}, + {file = "types_opentracing-2.4.10.6-py3-none-any.whl", hash = "sha256:25914c834db033a4a38fc322df0b5e5e14503b0ac97f78304ae180d721555e97"}, ] [[package]] From 8da3c2185baa3f8fd2cb4e108e5a0a3972ba900c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 08:23:20 -0400 Subject: [PATCH 297/562] Bump regex from 1.9.1 to 1.9.3 (#16073) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d28fe3c228..ef1de44937 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick", "memchr", @@ -303,9 +303,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", @@ -314,9 +314,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "ryu" From 340f08c6f784439bce2e220c1cfb060e8f41c29f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 08:27:55 -0400 Subject: [PATCH 298/562] Bump serde from 1.0.179 to 1.0.183 (#16074) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef1de44937..45e0f116e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,22 +332,22 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.179" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a5bf42b8d227d4abf38a1ddb08602e229108a517cd4e5bb28f9c7eaafdce5c0" +checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.179" +version = "1.0.183" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "741e124f5485c7e60c03b043f79f320bff3527f4bbf12cf3831750dc46a0ec2c" +checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" dependencies = [ "proc-macro2", "quote", - "syn 2.0.25", + "syn 2.0.28", ] [[package]] @@ -386,9 +386,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.25" +version = "2.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" dependencies = [ "proc-macro2", "quote", From b57630c507611a5e2d3ee13a966877199624d29a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 14:18:09 +0000 Subject: [PATCH 299/562] Bump jsonschema from 4.18.3 to 4.19.0 (#16081) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2aae6c6b4e..71b47a5805 100644 --- a/poetry.lock +++ b/poetry.lock @@ -973,13 +973,13 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.18.3" +version = "4.19.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.18.3-py3-none-any.whl", hash = "sha256:aab78b34c2de001c6b692232f08c21a97b436fe18e0b817bf0511046924fceef"}, - {file = "jsonschema-4.18.3.tar.gz", hash = "sha256:64b7104d72efe856bea49ca4af37a14a9eba31b40bb7238179f3803130fd34d9"}, + {file = "jsonschema-4.19.0-py3-none-any.whl", hash = "sha256:043dc26a3845ff09d20e4420d6012a9c91c9aa8999fa184e7efcfeccb41e32cb"}, + {file = "jsonschema-4.19.0.tar.gz", hash = "sha256:6e1e7569ac13be8139b2dd2c21a55d350066ee3f80df06c608b398cdc6f30e8f"}, ] [package.dependencies] From 9d3713d6d512d30a42456c9af25a3ab1a8865406 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 7 Aug 2023 18:36:04 +0100 Subject: [PATCH 300/562] Add notes describing Synapse's streams (#16015) Co-authored-by: Patrick Cloke --- changelog.d/16015.doc | 1 + docs/SUMMARY.md | 1 + .../synapse_architecture/streams.md | 157 ++++++++++++++++++ 3 files changed, 159 insertions(+) create mode 100644 changelog.d/16015.doc create mode 100644 docs/development/synapse_architecture/streams.md diff --git a/changelog.d/16015.doc b/changelog.d/16015.doc new file mode 100644 index 0000000000..1113d00dc6 --- /dev/null +++ b/changelog.d/16015.doc @@ -0,0 +1 @@ +Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index a8e5ddad9d..31b3032029 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -97,6 +97,7 @@ - [Cancellation](development/synapse_architecture/cancellation.md) - [Log Contexts](log_contexts.md) - [Replication](replication.md) + - [Streams](development/synapse_architecture/streams.md) - [TCP Replication](tcp_replication.md) - [Faster remote joins](development/synapse_architecture/faster_joins.md) - [Internal Documentation](development/internal_documentation/README.md) diff --git a/docs/development/synapse_architecture/streams.md b/docs/development/synapse_architecture/streams.md new file mode 100644 index 0000000000..bee0b8a8c0 --- /dev/null +++ b/docs/development/synapse_architecture/streams.md @@ -0,0 +1,157 @@ +## Streams + +Synapse has a concept of "streams", which are roughly described in [`id_generators.py`]( + https://github.com/matrix-org/synapse/blob/develop/synapse/storage/util/id_generators.py +). +Generally speaking, streams are a series of notifications that something in Synapse's database has changed that the application might need to respond to. +For example: + +- The events stream reports new events (PDUs) that Synapse creates, or that Synapse accepts from another homeserver. +- The account data stream reports changes to users' [account data](https://spec.matrix.org/v1.7/client-server-api/#client-config). +- The to-device stream reports when a device has a new [to-device message](https://spec.matrix.org/v1.7/client-server-api/#send-to-device-messaging). + +See [`synapse.replication.tcp.streams`]( + https://github.com/matrix-org/synapse/blob/develop/synapse/replication/tcp/streams/__init__.py +) for the full list of streams. + +It is very helpful to understand the streams mechanism when working on any part of Synapse that needs to respond to changes—especially if those changes are made by different workers. +To that end, let's describe streams formally, paraphrasing from the docstring of [`AbstractStreamIdGenerator`]( + https://github.com/matrix-org/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96 +). + +### Definition + +A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time. +Only "writers" can add facts to a stream, and there may be multiple writers. + +Each fact has an ID, called its "stream ID". +Readers should only process facts in ascending stream ID order. + +Roughly speaking, each stream is backed by a database table. +It should have a `stream_id` (or similar) bigint column holding stream IDs, plus additional columns as necessary to describe the fact. +Typically, a fact is expressed with a single row in its backing table.[^2] +Within a stream, no two facts may have the same stream_id. + +> _Aside_. Some additional notes on streams' backing tables. +> +> 1. Rich would like to [ditch the backing tables](https://github.com/matrix-org/synapse/issues/13456). +> 2. The backing tables may have other uses. + > For example, the events table serves backs the events stream, and is read when processing new events. + > But old rows are read from the table all the time, whenever Synapse needs to lookup some facts about an event. +> 3. Rich suspects that sometimes the stream is backed by multiple tables, so the stream proper is the union of those tables. + +Stream writers can "reserve" a stream ID, and then later mark it as having being completed. +Stream writers need to track the completion of each stream fact. +In the happy case, completion means a fact has been written to the stream table. +But unhappy cases (e.g. transaction rollback due to an error) also count as completion. +Once completed, the rows written with that stream ID are fixed, and no new rows +will be inserted with that ID. + +### Current stream ID + +For any given stream reader (including writers themselves), we may define a per-writer current stream ID: + +> The current stream ID _for a writer W_ is the largest stream ID such that +> all transactions added by W with equal or smaller ID have completed. + +Similarly, there is a "linear" notion of current stream ID: + +> The "linear" current stream ID is the largest stream ID such that +> all facts (added by any writer) with equal or smaller ID have completed. + +Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs. +Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates. + +**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID. + +For single-writer streams, the per-writer current ID and the linear current ID are the same. +Both senses of current ID are monotonic, but they may "skip" or jump over IDs because facts complete out of order. + + +_Example_. +Consider a single-writer stream which is initially at ID 1. + +| Action | Current stream ID | Notes | +|------------|-------------------|-------------------------------------------------| +| | 1 | | +| Reserve 2 | 1 | | +| Reserve 3 | 1 | | +| Complete 3 | 1 | current ID unchanged, waiting for 2 to complete | +| Complete 2 | 3 | current ID jumps from 1 -> 3 | +| Reserve 4 | 3 | | +| Reserve 5 | 3 | | +| Reserve 6 | 3 | | +| Complete 5 | 3 | | +| Complete 4 | 5 | current ID jumps 3->5, even though 6 is pending | +| Complete 6 | 6 | | + + +### Multi-writer streams + +There are two ways to view a multi-writer stream. + +1. Treat it as a collection of distinct single-writer streams, one + for each writer. +2. Treat it as a single stream. + +The single stream (option 2) is conceptually simpler, and easier to represent (a single stream id). +However, it requires each reader to know about the entire set of writers, to ensures that readers don't erroneously advance their current stream position too early and miss a fact from an unknown writer. +In contrast, multiple parallel streams (option 1) are more complex, requiring more state to represent (map from writer to stream id). +The payoff for doing so is that readers can "peek" ahead to facts that completed on one writer no matter the state of the others, reducing latency. + +Note that a multi-writer stream can be viewed in both ways. +For example, the events stream is treated as multiple single-writer streams (option 1) by the sync handler, so that events are sent to clients as soon as possible. +But the background process that works through events treats them as a single linear stream. + +Another useful example is the cache invalidation stream. +The facts this stream holds are instructions to "you should now invalidate these cache entries". +We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations. +(Invalidations are self-contained facts; and the invalidations commute/are idempotent). + +### Writing to streams + +Writers need to track: + - track their current position (i.e. its own per-writer stream ID). + - their facts currently awaiting completion. + +At startup, + - the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and + - there are no facts awaiting completion. + +To reserve a stream ID, call [`nextval`](https://www.postgresql.org/docs/current/functions-sequence.html) on the appropriate postgres sequence. + +To write a fact to the stream: insert the appropriate rows to the appropriate backing table. + +To complete a fact, first remove it from your map of facts currently awaiting completion. +Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream. +Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID. + +### Subscribing to streams + +Readers need to track the current position of every writer. + +At startup, they can find this by contacting each writer with a `REPLICATE` message, +requesting that all writers reply describing their current position in their streams. +Writers reply with a `POSITION` message. + +To learn about new facts, readers should listen for `RDATA` messages and process them to respond to the new fact. +The `RDATA` itself is not a self-contained representation of the fact; +readers will have to query the stream tables for the full details. +Readers must also advance their record of the writer's current position for that stream. + +# Summary + +In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous. + + +--- + +[^1]: we use the word _fact_ here for two reasons. +Firstly, the word "event" is already heavily overloaded (PDUs, EDUs, account data, ...) and we don't need to make that worse. +Secondly, "fact" emphasises that the things we append to a stream cannot change after the fact. + +[^2]: A fact might be expressed with 0 rows, e.g. if we opened a transaction to persist an event, but failed and rolled the transaction back before marking the fact as completed. +In principle a fact might be expressed with 2 or more rows; if so, each of those rows should share the fact's stream ID. + +[^3]: This communication used to happen directly with the writers [over TCP](../../tcp_replication.md); +nowadays it's done via Redis's Pubsub. From 81a6f8c9ae0241afa9973da2f53efc2467e61c6b Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 7 Aug 2023 10:37:08 -0700 Subject: [PATCH 301/562] Drop backwards compat hack for event serialization (#16069) --- changelog.d/16069.misc | 1 + synapse/events/snapshot.py | 12 +----------- 2 files changed, 2 insertions(+), 11 deletions(-) create mode 100644 changelog.d/16069.misc diff --git a/changelog.d/16069.misc b/changelog.d/16069.misc new file mode 100644 index 0000000000..f59ead8638 --- /dev/null +++ b/changelog.d/16069.misc @@ -0,0 +1 @@ +Drop backwards compat hack for event serialization. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a43498ed4d..a9e3d4e556 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -186,9 +186,6 @@ class EventContext(UnpersistedEventContextBase): ), "app_service_id": self.app_service.id if self.app_service else None, "partial_state": self.partial_state, - # add dummy delta_ids and prev_group for backwards compatibility - "delta_ids": None, - "prev_group": None, } @staticmethod @@ -203,13 +200,6 @@ class EventContext(UnpersistedEventContextBase): Returns: The event context. """ - # workaround for backwards/forwards compatibility: if the input doesn't have a value - # for "state_group_deltas" just assign an empty dict - state_group_deltas = input.get("state_group_deltas", None) - if state_group_deltas: - state_group_deltas = _decode_state_group_delta(state_group_deltas) - else: - state_group_deltas = {} context = EventContext( # We use the state_group and prev_state_id stuff to pull the @@ -217,7 +207,7 @@ class EventContext(UnpersistedEventContextBase): storage=storage, state_group=input["state_group"], state_group_before_event=input["state_group_before_event"], - state_group_deltas=state_group_deltas, + state_group_deltas=_decode_state_group_delta(input["state_group_deltas"]), state_delta_due_to_event=_decode_state_dict( input["state_delta_due_to_event"] ), From 8af3f33d84b0f63cb9baab6c8616983222d75307 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 7 Aug 2023 10:52:15 -0700 Subject: [PATCH 302/562] Fix endpoint improperly declaring support for MSC3814 (#16068) --- changelog.d/16068.misc | 1 + synapse/rest/client/devices.py | 18 ++++++++---------- 2 files changed, 9 insertions(+), 10 deletions(-) create mode 100644 changelog.d/16068.misc diff --git a/changelog.d/16068.misc b/changelog.d/16068.misc new file mode 100644 index 0000000000..341426a746 --- /dev/null +++ b/changelog.d/16068.misc @@ -0,0 +1 @@ +Fix endpoint improperly declaring support for MSC3814. diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index dd3f7fd666..51f17f80da 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -232,7 +232,7 @@ class DehydratedDeviceDataModel(RequestBodyModel): class DehydratedDeviceServlet(RestServlet): """Retrieve or store a dehydrated device. - Implements either MSC2697 or MSC3814. + Implements MSC2697. GET /org.matrix.msc2697.v2/dehydrated_device @@ -266,7 +266,12 @@ class DehydratedDeviceServlet(RestServlet): """ - def __init__(self, hs: "HomeServer", msc2697: bool = True): + PATTERNS = client_patterns( + "/org.matrix.msc2697.v2/dehydrated_device$", + releases=(), + ) + + def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs self.auth = hs.get_auth() @@ -274,13 +279,6 @@ class DehydratedDeviceServlet(RestServlet): assert isinstance(handler, DeviceHandler) self.device_handler = handler - self.PATTERNS = client_patterns( - "/org.matrix.msc2697.v2/dehydrated_device$" - if msc2697 - else "/org.matrix.msc3814.v1/dehydrated_device$", - releases=(), - ) - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) dehydrated_device = await self.device_handler.get_dehydrated_device( @@ -579,7 +577,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: DeviceRestServlet(hs).register(http_server) if hs.config.experimental.msc2697_enabled: - DehydratedDeviceServlet(hs, msc2697=True).register(http_server) + DehydratedDeviceServlet(hs).register(http_server) ClaimDehydratedDeviceServlet(hs).register(http_server) if hs.config.experimental.msc3814_enabled: DehydratedDeviceV2Servlet(hs).register(http_server) From f3dc6dc19f902b638c164097342136010f0769d1 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 8 Aug 2023 10:10:07 +0000 Subject: [PATCH 303/562] Remove old rows from the `cache_invalidation_stream_by_instance` table automatically. (This table is not used when Synapse is configured to use SQLite.) (#15868) * Add a cache invalidation clean-up task * Run the cache invalidation stream clean-up on the background worker * Tune down * call_later is in millis! * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * fixup! Add a cache invalidation clean-up task * Update synapse/storage/databases/main/cache.py Co-authored-by: Eric Eastwood * Update synapse/storage/databases/main/cache.py Co-authored-by: Eric Eastwood * MILLISEC -> MS * Expand on comment * Move and tweak comment about Postgres * Use `wrap_as_background_process` --------- Signed-off-by: Olivier Wilkinson (reivilibre) Co-authored-by: Eric Eastwood --- changelog.d/15868.feature | 1 + synapse/storage/databases/main/cache.py | 130 ++++++++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 changelog.d/15868.feature diff --git a/changelog.d/15868.feature b/changelog.d/15868.feature new file mode 100644 index 0000000000..a866bf5774 --- /dev/null +++ b/changelog.d/15868.feature @@ -0,0 +1 @@ +Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index c940f864d1..2fbd389c71 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -18,6 +18,8 @@ import logging from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Tuple from synapse.api.constants import EventTypes +from synapse.config._base import Config +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.replication.tcp.streams import BackfillStream, CachesStream from synapse.replication.tcp.streams.events import ( EventsStream, @@ -52,6 +54,21 @@ PURGE_HISTORY_CACHE_NAME = "ph_cache_fake" # As above, but for invalidating room caches on room deletion DELETE_ROOM_CACHE_NAME = "dr_cache_fake" +# How long between cache invalidation table cleanups, once we have caught up +# with the backlog. +REGULAR_CLEANUP_INTERVAL_MS = Config.parse_duration("1h") + +# How long between cache invalidation table cleanups, before we have caught +# up with the backlog. +CATCH_UP_CLEANUP_INTERVAL_MS = Config.parse_duration("1m") + +# Maximum number of cache invalidation rows to delete at once. +CLEAN_UP_MAX_BATCH_SIZE = 20_000 + +# Keep cache invalidations for 7 days +# (This is likely to be quite excessive.) +RETENTION_PERIOD_OF_CACHE_INVALIDATIONS_MS = Config.parse_duration("7d") + class CacheInvalidationWorkerStore(SQLBaseStore): def __init__( @@ -98,6 +115,18 @@ class CacheInvalidationWorkerStore(SQLBaseStore): else: self._cache_id_gen = None + # Occasionally clean up the cache invalidations stream table by deleting + # old rows. + # This is only applicable when Postgres is in use; this table is unused + # and not populated at all when SQLite is the active database engine. + if hs.config.worker.run_background_tasks and isinstance( + self.database_engine, PostgresEngine + ): + self.hs.get_clock().call_later( + CATCH_UP_CLEANUP_INTERVAL_MS / 1000, + self._clean_up_cache_invalidation_wrapper, + ) + async def get_all_updated_caches( self, instance_name: str, last_id: int, current_id: int, limit: int ) -> Tuple[List[Tuple[int, tuple]], int, bool]: @@ -554,3 +583,104 @@ class CacheInvalidationWorkerStore(SQLBaseStore): return self._cache_id_gen.get_current_token_for_writer(instance_name) else: return 0 + + @wrap_as_background_process("clean_up_old_cache_invalidations") + async def _clean_up_cache_invalidation_wrapper(self) -> None: + """ + Clean up cache invalidation stream table entries occasionally. + If we are behind (i.e. there are entries old enough to + be deleted but too many of them to be deleted in one go), + then we run slightly more frequently. + """ + delete_up_to: int = ( + self.hs.get_clock().time_msec() - RETENTION_PERIOD_OF_CACHE_INVALIDATIONS_MS + ) + + in_backlog = await self._clean_up_batch_of_old_cache_invalidations(delete_up_to) + + # Vary how long we wait before calling again depending on whether we + # are still sifting through backlog or we have caught up. + if in_backlog: + next_interval = CATCH_UP_CLEANUP_INTERVAL_MS + else: + next_interval = REGULAR_CLEANUP_INTERVAL_MS + + self.hs.get_clock().call_later( + next_interval / 1000, self._clean_up_cache_invalidation_wrapper + ) + + async def _clean_up_batch_of_old_cache_invalidations( + self, delete_up_to_millisec: int + ) -> bool: + """ + Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). + + Up to `CLEAN_UP_BATCH_SIZE` rows will be deleted at once. + + Returns true if and only if we were limited by batch size (i.e. we are in backlog: + there are more things to clean up). + """ + + def _clean_up_batch_of_old_cache_invalidations_txn( + txn: LoggingTransaction, + ) -> bool: + # First get the earliest stream ID + txn.execute( + """ + SELECT stream_id FROM cache_invalidation_stream_by_instance + ORDER BY stream_id ASC + LIMIT 1 + """ + ) + row = txn.fetchone() + if row is None: + return False + earliest_stream_id: int = row[0] + + # Then find the last stream ID of the range we will delete + txn.execute( + """ + SELECT stream_id FROM cache_invalidation_stream_by_instance + WHERE stream_id <= ? AND invalidation_ts <= ? + ORDER BY stream_id DESC + LIMIT 1 + """, + (earliest_stream_id + CLEAN_UP_MAX_BATCH_SIZE, delete_up_to_millisec), + ) + row = txn.fetchone() + if row is None: + return False + cutoff_stream_id: int = row[0] + + # Determine whether we are caught up or still catching up + txn.execute( + """ + SELECT invalidation_ts FROM cache_invalidation_stream_by_instance + WHERE stream_id > ? + ORDER BY stream_id ASC + LIMIT 1 + """, + (cutoff_stream_id,), + ) + row = txn.fetchone() + if row is None: + in_backlog = False + else: + # We are in backlog if the next row could have been deleted + # if we didn't have such a small batch size + in_backlog = row[0] <= delete_up_to_millisec + + txn.execute( + """ + DELETE FROM cache_invalidation_stream_by_instance + WHERE ? <= stream_id AND stream_id <= ? + """, + (earliest_stream_id, cutoff_stream_id), + ) + + return in_backlog + + return await self.db_pool.runInteraction( + "clean_up_old_cache_invalidations", + _clean_up_batch_of_old_cache_invalidations_txn, + ) From a476d5048b96d6f9422f3d31d3c14a5247855715 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 8 Aug 2023 10:53:49 +0000 Subject: [PATCH 304/562] Allow modules to schedule delayed background calls. (#15993) * Add a module API function to provide `call_later` * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Add comments * Update version number --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15993.misc | 1 + synapse/module_api/__init__.py | 41 ++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 changelog.d/15993.misc diff --git a/changelog.d/15993.misc b/changelog.d/15993.misc new file mode 100644 index 0000000000..35ead05157 --- /dev/null +++ b/changelog.d/15993.misc @@ -0,0 +1 @@ +Allow modules to schedule delayed background calls. \ No newline at end of file diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index ba1a925003..acee1dafd3 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -34,6 +34,7 @@ import jinja2 from typing_extensions import ParamSpec from twisted.internet import defer +from twisted.internet.interfaces import IDelayedCall from twisted.web.resource import Resource from synapse.api import errors @@ -1242,6 +1243,46 @@ class ModuleApi: """ return self._hs.config.worker.run_background_tasks + def delayed_background_call( + self, + msec: float, + f: Callable, + *args: object, + desc: Optional[str] = None, + **kwargs: object, + ) -> IDelayedCall: + """Wraps a function as a background process and calls it in a given number of milliseconds. + + The scheduled call is not persistent: if the current Synapse instance is + restarted before the call is made, the call will not be made. + + Added in Synapse v1.90.0. + + Args: + msec: How long to wait before calling, in milliseconds. + f: The function to call once. f can be either synchronous or + asynchronous, and must follow Synapse's logcontext rules. + More info about logcontexts is available at + https://matrix-org.github.io/synapse/latest/log_contexts.html + *args: Positional arguments to pass to function. + desc: The background task's description. Default to the function's name. + **kwargs: Keyword arguments to pass to function. + + Returns: + IDelayedCall handle from twisted, which allows to cancel the delayed call if desired. + """ + + if desc is None: + desc = f.__name__ + + return self._clock.call_later( + # convert ms to seconds as needed by call_later. + msec * 0.001, + run_as_background_process, + desc, + lambda: maybe_awaitable(f(*args, **kwargs)), + ) + async def sleep(self, seconds: float) -> None: """Sleeps for the given number of seconds. From 8e09b8aecbd01adb367b7a845348e9985f7a98af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Aug 2023 15:29:44 +0100 Subject: [PATCH 305/562] 1.90.0rc1 --- CHANGES.md | 65 +++++++++++++++++++++++++++++++++++++++ changelog.d/15525.misc | 1 - changelog.d/15629.feature | 1 - changelog.d/15754.misc | 1 - changelog.d/15791.bugfix | 1 - changelog.d/15868.feature | 1 - changelog.d/15964.removal | 1 - changelog.d/15972.docker | 1 - changelog.d/15991.misc | 1 - changelog.d/15992.misc | 1 - changelog.d/15993.misc | 1 - changelog.d/16009.docker | 1 - changelog.d/16011.misc | 1 - changelog.d/16012.bugfix | 1 - changelog.d/16013.misc | 1 - changelog.d/16015.doc | 1 - changelog.d/16016.doc | 2 -- changelog.d/16017.removal | 1 - changelog.d/16019.misc | 1 - changelog.d/16023.misc | 1 - changelog.d/16027.doc | 1 - changelog.d/16028.misc | 1 - changelog.d/16031.bugfix | 1 - changelog.d/16043.bugfix | 1 - changelog.d/16044.misc | 1 - changelog.d/16046.bugfix | 1 - changelog.d/16068.misc | 1 - changelog.d/16069.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 30 files changed, 72 insertions(+), 29 deletions(-) delete mode 100644 changelog.d/15525.misc delete mode 100644 changelog.d/15629.feature delete mode 100644 changelog.d/15754.misc delete mode 100644 changelog.d/15791.bugfix delete mode 100644 changelog.d/15868.feature delete mode 100644 changelog.d/15964.removal delete mode 100644 changelog.d/15972.docker delete mode 100644 changelog.d/15991.misc delete mode 100644 changelog.d/15992.misc delete mode 100644 changelog.d/15993.misc delete mode 100644 changelog.d/16009.docker delete mode 100644 changelog.d/16011.misc delete mode 100644 changelog.d/16012.bugfix delete mode 100644 changelog.d/16013.misc delete mode 100644 changelog.d/16015.doc delete mode 100644 changelog.d/16016.doc delete mode 100644 changelog.d/16017.removal delete mode 100644 changelog.d/16019.misc delete mode 100644 changelog.d/16023.misc delete mode 100644 changelog.d/16027.doc delete mode 100644 changelog.d/16028.misc delete mode 100644 changelog.d/16031.bugfix delete mode 100644 changelog.d/16043.bugfix delete mode 100644 changelog.d/16044.misc delete mode 100644 changelog.d/16046.bugfix delete mode 100644 changelog.d/16068.misc delete mode 100644 changelog.d/16069.misc diff --git a/CHANGES.md b/CHANGES.md index 74125613f2..01488f1e93 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,68 @@ +# Synapse 1.90.0rc1 (2023-08-08) + +### Features + +- Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). ([\#15629](https://github.com/matrix-org/synapse/issues/15629)) +- Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). ([\#15868](https://github.com/matrix-org/synapse/issues/15868)) + +### Bugfixes + +- Fix bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791)) +- Fix 404 not found code returned on profile endpoint when the display name is empty but not the avatar URL. ([\#16012](https://github.com/matrix-org/synapse/issues/16012)) +- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031)) +- Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043)) +- Fix deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046)) + +### Updates to the Docker image + +- Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. ([\#15972](https://github.com/matrix-org/synapse/issues/15972), [\#16009](https://github.com/matrix-org/synapse/issues/16009)) + +### Improved Documentation + +- Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). ([\#16015](https://github.com/matrix-org/synapse/issues/16015)) +- Clarify comment on the keys/upload over replication enpoint. ([\#16016](https://github.com/matrix-org/synapse/issues/16016)) +- Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. ([\#16027](https://github.com/matrix-org/synapse/issues/16027)) + +### Deprecations and Removals + +- Remove support for legacy application service paths. ([\#15964](https://github.com/matrix-org/synapse/issues/15964)) +- Move support for application service query parameter authorization behind a configuration option. ([\#16017](https://github.com/matrix-org/synapse/issues/16017)) + +### Internal Changes + +- Update SQL queries to inline boolean parameters as supported in SQLite 3.27. ([\#15525](https://github.com/matrix-org/synapse/issues/15525)) +- Allow for the configuration of the backoff algorithm for federation destinations. ([\#15754](https://github.com/matrix-org/synapse/issues/15754)) +- Allow modules to check whether the current worker is configured to run background tasks. ([\#15991](https://github.com/matrix-org/synapse/issues/15991)) +- Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. ([\#15992](https://github.com/matrix-org/synapse/issues/15992)) +- Allow modules to schedule delayed background calls. ([\#15993](https://github.com/matrix-org/synapse/issues/15993)) +- Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011)) +- Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. ([\#16013](https://github.com/matrix-org/synapse/issues/16013)) +- Fix building the nix development environment on MacOS systems. ([\#16019](https://github.com/matrix-org/synapse/issues/16019)) +- Combine duplicated code. ([\#16023](https://github.com/matrix-org/synapse/issues/16023)) +- Collect additional metrics from `ResponseCache` for eviction. ([\#16028](https://github.com/matrix-org/synapse/issues/16028)) +- Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044)) +- Fix endpoint improperly declaring support for MSC3814. ([\#16068](https://github.com/matrix-org/synapse/issues/16068)) +- Drop backwards compat hack for event serialization. ([\#16069](https://github.com/matrix-org/synapse/issues/16069)) + +### Updates to locked dependencies + +* Bump cryptography from 41.0.2 to 41.0.3. ([\#16048](https://github.com/matrix-org/synapse/issues/16048)) +* Bump furo from 2023.5.20 to 2023.7.26. ([\#16077](https://github.com/matrix-org/synapse/issues/16077)) +* Bump immutabledict from 2.2.4 to 3.0.0. ([\#16034](https://github.com/matrix-org/synapse/issues/16034)) +* Bump jsonschema from 4.18.3 to 4.19.0. ([\#16081](https://github.com/matrix-org/synapse/issues/16081)) +* Bump phonenumbers from 8.13.14 to 8.13.18. ([\#16076](https://github.com/matrix-org/synapse/issues/16076)) +* Bump regex from 1.9.1 to 1.9.3. ([\#16073](https://github.com/matrix-org/synapse/issues/16073)) +* Bump serde from 1.0.171 to 1.0.175. ([\#15982](https://github.com/matrix-org/synapse/issues/15982)) +* Bump serde from 1.0.175 to 1.0.179. ([\#16033](https://github.com/matrix-org/synapse/issues/16033)) +* Bump serde from 1.0.179 to 1.0.183. ([\#16074](https://github.com/matrix-org/synapse/issues/16074)) +* Bump serde_json from 1.0.103 to 1.0.104. ([\#16032](https://github.com/matrix-org/synapse/issues/16032)) +* Bump service-identity from 21.1.0 to 23.1.0. ([\#16038](https://github.com/matrix-org/synapse/issues/16038)) +* Bump types-commonmark from 0.9.2.3 to 0.9.2.4. ([\#16037](https://github.com/matrix-org/synapse/issues/16037)) +* Bump types-jsonschema from 4.17.0.8 to 4.17.0.10. ([\#16036](https://github.com/matrix-org/synapse/issues/16036)) +* Bump types-netaddr from 0.8.0.8 to 0.8.0.9. ([\#16035](https://github.com/matrix-org/synapse/issues/16035)) +* Bump types-opentracing from 2.4.10.5 to 2.4.10.6. ([\#16078](https://github.com/matrix-org/synapse/issues/16078)) +* Bump types-setuptools from 68.0.0.0 to 68.0.0.3. ([\#16079](https://github.com/matrix-org/synapse/issues/16079)) + # Synapse 1.89.0 (2023-08-01) No significant changes since 1.89.0rc1. diff --git a/changelog.d/15525.misc b/changelog.d/15525.misc deleted file mode 100644 index 67ab0cf62f..0000000000 --- a/changelog.d/15525.misc +++ /dev/null @@ -1 +0,0 @@ -Update SQL queries to inline boolean parameters as supported in SQLite 3.27. diff --git a/changelog.d/15629.feature b/changelog.d/15629.feature deleted file mode 100644 index 16264effca..0000000000 --- a/changelog.d/15629.feature +++ /dev/null @@ -1 +0,0 @@ -Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). diff --git a/changelog.d/15754.misc b/changelog.d/15754.misc deleted file mode 100644 index 4314d415a3..0000000000 --- a/changelog.d/15754.misc +++ /dev/null @@ -1 +0,0 @@ -Allow for the configuration of the backoff algorithm for federation destinations. diff --git a/changelog.d/15791.bugfix b/changelog.d/15791.bugfix deleted file mode 100644 index 182634b62f..0000000000 --- a/changelog.d/15791.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where purging history and paginating simultaneously could lead to database corruption when using workers. diff --git a/changelog.d/15868.feature b/changelog.d/15868.feature deleted file mode 100644 index a866bf5774..0000000000 --- a/changelog.d/15868.feature +++ /dev/null @@ -1 +0,0 @@ -Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). diff --git a/changelog.d/15964.removal b/changelog.d/15964.removal deleted file mode 100644 index 7613afe505..0000000000 --- a/changelog.d/15964.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for legacy application service paths. diff --git a/changelog.d/15972.docker b/changelog.d/15972.docker deleted file mode 100644 index 7fd9707deb..0000000000 --- a/changelog.d/15972.docker +++ /dev/null @@ -1 +0,0 @@ -Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. diff --git a/changelog.d/15991.misc b/changelog.d/15991.misc deleted file mode 100644 index 18f388cff8..0000000000 --- a/changelog.d/15991.misc +++ /dev/null @@ -1 +0,0 @@ -Allow modules to check whether the current worker is configured to run background tasks. \ No newline at end of file diff --git a/changelog.d/15992.misc b/changelog.d/15992.misc deleted file mode 100644 index 539f55b475..0000000000 --- a/changelog.d/15992.misc +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. diff --git a/changelog.d/15993.misc b/changelog.d/15993.misc deleted file mode 100644 index 35ead05157..0000000000 --- a/changelog.d/15993.misc +++ /dev/null @@ -1 +0,0 @@ -Allow modules to schedule delayed background calls. \ No newline at end of file diff --git a/changelog.d/16009.docker b/changelog.d/16009.docker deleted file mode 100644 index 7fd9707deb..0000000000 --- a/changelog.d/16009.docker +++ /dev/null @@ -1 +0,0 @@ -Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. diff --git a/changelog.d/16011.misc b/changelog.d/16011.misc deleted file mode 100644 index 8a8d9822c6..0000000000 --- a/changelog.d/16011.misc +++ /dev/null @@ -1 +0,0 @@ -Update PyYAML to 6.0.1. diff --git a/changelog.d/16012.bugfix b/changelog.d/16012.bugfix deleted file mode 100644 index 44ca9377ff..0000000000 --- a/changelog.d/16012.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 404 not found code returned on profile endpoint when the display name is empty but not the avatar URL. diff --git a/changelog.d/16013.misc b/changelog.d/16013.misc deleted file mode 100644 index bd161e13ed..0000000000 --- a/changelog.d/16013.misc +++ /dev/null @@ -1 +0,0 @@ -Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. diff --git a/changelog.d/16015.doc b/changelog.d/16015.doc deleted file mode 100644 index 1113d00dc6..0000000000 --- a/changelog.d/16015.doc +++ /dev/null @@ -1 +0,0 @@ -Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). diff --git a/changelog.d/16016.doc b/changelog.d/16016.doc deleted file mode 100644 index e677058c2d..0000000000 --- a/changelog.d/16016.doc +++ /dev/null @@ -1,2 +0,0 @@ -Clarify comment on the keys/upload over replication enpoint. - diff --git a/changelog.d/16017.removal b/changelog.d/16017.removal deleted file mode 100644 index 6b72442892..0000000000 --- a/changelog.d/16017.removal +++ /dev/null @@ -1 +0,0 @@ -Move support for application service query parameter authorization behind a configuration option. diff --git a/changelog.d/16019.misc b/changelog.d/16019.misc deleted file mode 100644 index 0e583302ee..0000000000 --- a/changelog.d/16019.misc +++ /dev/null @@ -1 +0,0 @@ -Fix building the nix development environment on MacOS systems. \ No newline at end of file diff --git a/changelog.d/16023.misc b/changelog.d/16023.misc deleted file mode 100644 index ee732318e4..0000000000 --- a/changelog.d/16023.misc +++ /dev/null @@ -1 +0,0 @@ -Combine duplicated code. diff --git a/changelog.d/16027.doc b/changelog.d/16027.doc deleted file mode 100644 index 201e88d6b6..0000000000 --- a/changelog.d/16027.doc +++ /dev/null @@ -1 +0,0 @@ -Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. diff --git a/changelog.d/16028.misc b/changelog.d/16028.misc deleted file mode 100644 index 3a1e9fef09..0000000000 --- a/changelog.d/16028.misc +++ /dev/null @@ -1 +0,0 @@ -Collect additional metrics from `ResponseCache` for eviction. diff --git a/changelog.d/16031.bugfix b/changelog.d/16031.bugfix deleted file mode 100644 index e48bf3975c..0000000000 --- a/changelog.d/16031.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove leading and trailing spaces when setting a display name. diff --git a/changelog.d/16043.bugfix b/changelog.d/16043.bugfix deleted file mode 100644 index 78c0f3455a..0000000000 --- a/changelog.d/16043.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. diff --git a/changelog.d/16044.misc b/changelog.d/16044.misc deleted file mode 100644 index 2e7137ccc2..0000000000 --- a/changelog.d/16044.misc +++ /dev/null @@ -1 +0,0 @@ -Update certifi to 2023.7.22 and pygments to 2.15.1. diff --git a/changelog.d/16046.bugfix b/changelog.d/16046.bugfix deleted file mode 100644 index ce5a9ae4b5..0000000000 --- a/changelog.d/16046.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix deletion in dehydrated devices v2. diff --git a/changelog.d/16068.misc b/changelog.d/16068.misc deleted file mode 100644 index 341426a746..0000000000 --- a/changelog.d/16068.misc +++ /dev/null @@ -1 +0,0 @@ -Fix endpoint improperly declaring support for MSC3814. diff --git a/changelog.d/16069.misc b/changelog.d/16069.misc deleted file mode 100644 index f59ead8638..0000000000 --- a/changelog.d/16069.misc +++ /dev/null @@ -1 +0,0 @@ -Drop backwards compat hack for event serialization. diff --git a/debian/changelog b/debian/changelog index 90240b8082..ed35abc9ee 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium + + * New Synapse release 1.90.0rc1. + + -- Synapse Packaging team Tue, 08 Aug 2023 15:29:34 +0100 + matrix-synapse-py3 (1.89.0) stable; urgency=medium * New Synapse release 1.89.0. diff --git a/pyproject.toml b/pyproject.toml index 8304d25221..ca532e2c7c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.89.0" +version = "1.90.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 3dfe5c0270dd35ef79d31136dde15e1cd0d52e5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Aug 2023 15:33:38 +0100 Subject: [PATCH 306/562] Fixup changelog --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 01488f1e93..75bf2d9365 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,6 @@ - Fix bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791)) - Fix 404 not found code returned on profile endpoint when the display name is empty but not the avatar URL. ([\#16012](https://github.com/matrix-org/synapse/issues/16012)) -- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031)) - Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043)) - Fix deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046)) @@ -35,20 +34,21 @@ - Allow modules to check whether the current worker is configured to run background tasks. ([\#15991](https://github.com/matrix-org/synapse/issues/15991)) - Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. ([\#15992](https://github.com/matrix-org/synapse/issues/15992)) - Allow modules to schedule delayed background calls. ([\#15993](https://github.com/matrix-org/synapse/issues/15993)) -- Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011)) - Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. ([\#16013](https://github.com/matrix-org/synapse/issues/16013)) - Fix building the nix development environment on MacOS systems. ([\#16019](https://github.com/matrix-org/synapse/issues/16019)) +- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031)) - Combine duplicated code. ([\#16023](https://github.com/matrix-org/synapse/issues/16023)) - Collect additional metrics from `ResponseCache` for eviction. ([\#16028](https://github.com/matrix-org/synapse/issues/16028)) -- Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044)) - Fix endpoint improperly declaring support for MSC3814. ([\#16068](https://github.com/matrix-org/synapse/issues/16068)) - Drop backwards compat hack for event serialization. ([\#16069](https://github.com/matrix-org/synapse/issues/16069)) ### Updates to locked dependencies +* Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011)) * Bump cryptography from 41.0.2 to 41.0.3. ([\#16048](https://github.com/matrix-org/synapse/issues/16048)) * Bump furo from 2023.5.20 to 2023.7.26. ([\#16077](https://github.com/matrix-org/synapse/issues/16077)) * Bump immutabledict from 2.2.4 to 3.0.0. ([\#16034](https://github.com/matrix-org/synapse/issues/16034)) +* Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044)) * Bump jsonschema from 4.18.3 to 4.19.0. ([\#16081](https://github.com/matrix-org/synapse/issues/16081)) * Bump phonenumbers from 8.13.14 to 8.13.18. ([\#16076](https://github.com/matrix-org/synapse/issues/16076)) * Bump regex from 1.9.1 to 1.9.3. ([\#16073](https://github.com/matrix-org/synapse/issues/16073)) From 4581809846165840b68aae4c7b8bf4c6efff6d66 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Aug 2023 15:38:45 +0100 Subject: [PATCH 307/562] Fixup changelog --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 75bf2d9365..95d8227ee0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,10 +7,10 @@ ### Bugfixes -- Fix bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791)) -- Fix 404 not found code returned on profile endpoint when the display name is empty but not the avatar URL. ([\#16012](https://github.com/matrix-org/synapse/issues/16012)) +- Fix a long-standing bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791)) +- Fix a long-standing bug where profile endpoint returned a 404 when the user's display name was empty. ([\#16012](https://github.com/matrix-org/synapse/issues/16012)) - Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043)) -- Fix deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046)) +- Fix long-standing bug with deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046)) ### Updates to the Docker image From 0328b56468fe12c4d86ef636b60964527a510160 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 8 Aug 2023 12:04:46 -0700 Subject: [PATCH 308/562] Support MSC3814: Dehydrated Devices Part 2 (#16010) --- changelog.d/16010.misc | 1 + synapse/handlers/device.py | 14 +- synapse/handlers/devicemessage.py | 13 -- synapse/rest/client/devices.py | 16 +- synapse/storage/databases/main/devices.py | 51 ++++- .../storage/databases/main/end_to_end_keys.py | 178 ++++++++++++------ tests/handlers/test_device.py | 9 +- tests/rest/client/test_devices.py | 77 +++++++- 8 files changed, 258 insertions(+), 101 deletions(-) create mode 100644 changelog.d/16010.misc diff --git a/changelog.d/16010.misc b/changelog.d/16010.misc new file mode 100644 index 0000000000..1e1a148069 --- /dev/null +++ b/changelog.d/16010.misc @@ -0,0 +1 @@ +Update dehydrated devices implementation. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index b7bf70a72d..5ae427d52c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -385,6 +385,7 @@ class DeviceHandler(DeviceWorkerHandler): self.federation_sender = hs.get_federation_sender() self._account_data_handler = hs.get_account_data_handler() self._storage_controllers = hs.get_storage_controllers() + self.db_pool = hs.get_datastores().main.db_pool self.device_list_updater = DeviceListUpdater(hs, self) @@ -656,15 +657,17 @@ class DeviceHandler(DeviceWorkerHandler): device_id: Optional[str], device_data: JsonDict, initial_device_display_name: Optional[str] = None, + keys_for_device: Optional[JsonDict] = None, ) -> str: - """Store a dehydrated device for a user. If the user had a previous - dehydrated device, it is removed. + """Store a dehydrated device for a user, optionally storing the keys associated with + it as well. If the user had a previous dehydrated device, it is removed. Args: user_id: the user that we are storing the device for device_id: device id supplied by client device_data: the dehydrated device information initial_device_display_name: The display name to use for the device + keys_for_device: keys for the dehydrated device Returns: device id of the dehydrated device """ @@ -673,11 +676,16 @@ class DeviceHandler(DeviceWorkerHandler): device_id, initial_device_display_name, ) + + time_now = self.clock.time_msec() + old_device_id = await self.store.store_dehydrated_device( - user_id, device_id, device_data + user_id, device_id, device_data, time_now, keys_for_device ) + if old_device_id is not None: await self.delete_devices(user_id, [old_device_id]) + return device_id async def rehydrate_device( diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 15e94a03cb..17ff8821d9 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -367,19 +367,6 @@ class DeviceMessageHandler: errcode=Codes.INVALID_PARAM, ) - # if we have a since token, delete any to-device messages before that token - # (since we now know that the device has received them) - deleted = await self.store.delete_messages_for_device( - user_id, device_id, since_stream_id - ) - logger.debug( - "Deleted %d to-device messages up to %d for user_id %s device_id %s", - deleted, - since_stream_id, - user_id, - device_id, - ) - to_token = self.event_sources.get_current_token().to_device_key messages, stream_id = await self.store.get_messages_for_device( diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 51f17f80da..925f037743 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -29,7 +29,6 @@ from synapse.http.servlet import ( parse_integer, ) from synapse.http.site import SynapseRequest -from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet from synapse.rest.client._base import client_patterns, interactive_auth_handler from synapse.rest.client.models import AuthenticationData from synapse.rest.models import RequestBodyModel @@ -480,13 +479,6 @@ class DehydratedDeviceV2Servlet(RestServlet): self.e2e_keys_handler = hs.get_e2e_keys_handler() self.device_handler = handler - if hs.config.worker.worker_app is None: - # if main process - self.key_uploader = self.e2e_keys_handler.upload_keys_for_user - else: - # then a worker - self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs) - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) @@ -549,18 +541,12 @@ class DehydratedDeviceV2Servlet(RestServlet): "Device key(s) not found, these must be provided.", ) - # TODO: Those two operations, creating a device and storing the - # device's keys should be atomic. device_id = await self.device_handler.store_dehydrated_device( requester.user.to_string(), submission.device_id, submission.device_data.dict(), submission.initial_device_display_name, - ) - - # TODO: Do we need to do something with the result here? - await self.key_uploader( - user_id=user_id, device_id=submission.device_id, keys=submission.dict() + device_info, ) return 200, {"device_id": device_id} diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d9df437e51..e4162f846b 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -28,6 +28,7 @@ from typing import ( cast, ) +from canonicaljson import encode_canonical_json from typing_extensions import Literal from synapse.api.constants import EduTypes @@ -1188,8 +1189,42 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) def _store_dehydrated_device_txn( - self, txn: LoggingTransaction, user_id: str, device_id: str, device_data: str + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + device_data: str, + time: int, + keys: Optional[JsonDict] = None, ) -> Optional[str]: + # TODO: make keys non-optional once support for msc2697 is dropped + if keys: + device_keys = keys.get("device_keys", None) + if device_keys: + # Type ignore - this function is defined on EndToEndKeyStore which we do + # have access to due to hs.get_datastore() "magic" + self._set_e2e_device_keys_txn( # type: ignore[attr-defined] + txn, user_id, device_id, time, device_keys + ) + + one_time_keys = keys.get("one_time_keys", None) + if one_time_keys: + key_list = [] + for key_id, key_obj in one_time_keys.items(): + algorithm, key_id = key_id.split(":") + key_list.append( + ( + algorithm, + key_id, + encode_canonical_json(key_obj).decode("ascii"), + ) + ) + self._add_e2e_one_time_keys_txn(txn, user_id, device_id, time, key_list) + + fallback_keys = keys.get("fallback_keys", None) + if fallback_keys: + self._set_e2e_fallback_keys_txn(txn, user_id, device_id, fallback_keys) + old_device_id = self.db_pool.simple_select_one_onecol_txn( txn, table="dehydrated_devices", @@ -1203,10 +1238,16 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): keyvalues={"user_id": user_id}, values={"device_id": device_id, "device_data": device_data}, ) + return old_device_id async def store_dehydrated_device( - self, user_id: str, device_id: str, device_data: JsonDict + self, + user_id: str, + device_id: str, + device_data: JsonDict, + time_now: int, + keys: Optional[dict] = None, ) -> Optional[str]: """Store a dehydrated device for a user. @@ -1214,15 +1255,21 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): user_id: the user that we are storing the device for device_id: the ID of the dehydrated device device_data: the dehydrated device information + time_now: current time at the request in milliseconds + keys: keys for the dehydrated device + Returns: device id of the user's previous dehydrated device, if any """ + return await self.db_pool.runInteraction( "store_dehydrated_device_txn", self._store_dehydrated_device_txn, user_id, device_id, json_encoder.encode(device_data), + time_now, + keys, ) async def remove_dehydrated_device(self, user_id: str, device_id: str) -> bool: diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 91ae9c457d..b49dea577c 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -522,36 +522,57 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker new_keys: keys to add - each a tuple of (algorithm, key_id, key json) """ - def _add_e2e_one_time_keys(txn: LoggingTransaction) -> None: - set_tag("user_id", user_id) - set_tag("device_id", device_id) - set_tag("new_keys", str(new_keys)) - # We are protected from race between lookup and insertion due to - # a unique constraint. If there is a race of two calls to - # `add_e2e_one_time_keys` then they'll conflict and we will only - # insert one set. - self.db_pool.simple_insert_many_txn( - txn, - table="e2e_one_time_keys_json", - keys=( - "user_id", - "device_id", - "algorithm", - "key_id", - "ts_added_ms", - "key_json", - ), - values=[ - (user_id, device_id, algorithm, key_id, time_now, json_bytes) - for algorithm, key_id, json_bytes in new_keys - ], - ) - self._invalidate_cache_and_stream( - txn, self.count_e2e_one_time_keys, (user_id, device_id) - ) - await self.db_pool.runInteraction( - "add_e2e_one_time_keys_insert", _add_e2e_one_time_keys + "add_e2e_one_time_keys_insert", + self._add_e2e_one_time_keys_txn, + user_id, + device_id, + time_now, + new_keys, + ) + + def _add_e2e_one_time_keys_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + time_now: int, + new_keys: Iterable[Tuple[str, str, str]], + ) -> None: + """Insert some new one time keys for a device. Errors if any of the keys already exist. + + Args: + user_id: id of user to get keys for + device_id: id of device to get keys for + time_now: insertion time to record (ms since epoch) + new_keys: keys to add - each a tuple of (algorithm, key_id, key json) - note + that the key JSON must be in canonical JSON form + """ + set_tag("user_id", user_id) + set_tag("device_id", device_id) + set_tag("new_keys", str(new_keys)) + # We are protected from race between lookup and insertion due to + # a unique constraint. If there is a race of two calls to + # `add_e2e_one_time_keys` then they'll conflict and we will only + # insert one set. + self.db_pool.simple_insert_many_txn( + txn, + table="e2e_one_time_keys_json", + keys=( + "user_id", + "device_id", + "algorithm", + "key_id", + "ts_added_ms", + "key_json", + ), + values=[ + (user_id, device_id, algorithm, key_id, time_now, json_bytes) + for algorithm, key_id, json_bytes in new_keys + ], + ) + self._invalidate_cache_and_stream( + txn, self.count_e2e_one_time_keys, (user_id, device_id) ) @cached(max_entries=10000) @@ -723,6 +744,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker device_id: str, fallback_keys: JsonDict, ) -> None: + """Set the user's e2e fallback keys. + + Args: + user_id: the user whose keys are being set + device_id: the device whose keys are being set + fallback_keys: the keys to set. This is a map from key ID (which is + of the form "algorithm:id") to key data. + """ # fallback_keys will usually only have one item in it, so using a for # loop (as opposed to calling simple_upsert_many_txn) won't be too bad # FIXME: make sure that only one key per algorithm is uploaded @@ -1304,43 +1333,70 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): ) -> bool: """Stores device keys for a device. Returns whether there was a change or the keys were already in the database. + + Args: + user_id: user_id of the user to store keys for + device_id: device_id of the device to store keys for + time_now: time at the request to store the keys + device_keys: the keys to store """ - def _set_e2e_device_keys_txn(txn: LoggingTransaction) -> bool: - set_tag("user_id", user_id) - set_tag("device_id", device_id) - set_tag("time_now", time_now) - set_tag("device_keys", str(device_keys)) - - old_key_json = self.db_pool.simple_select_one_onecol_txn( - txn, - table="e2e_device_keys_json", - keyvalues={"user_id": user_id, "device_id": device_id}, - retcol="key_json", - allow_none=True, - ) - - # In py3 we need old_key_json to match new_key_json type. The DB - # returns unicode while encode_canonical_json returns bytes. - new_key_json = encode_canonical_json(device_keys).decode("utf-8") - - if old_key_json == new_key_json: - log_kv({"Message": "Device key already stored."}) - return False - - self.db_pool.simple_upsert_txn( - txn, - table="e2e_device_keys_json", - keyvalues={"user_id": user_id, "device_id": device_id}, - values={"ts_added_ms": time_now, "key_json": new_key_json}, - ) - log_kv({"message": "Device keys stored."}) - return True - return await self.db_pool.runInteraction( - "set_e2e_device_keys", _set_e2e_device_keys_txn + "set_e2e_device_keys", + self._set_e2e_device_keys_txn, + user_id, + device_id, + time_now, + device_keys, ) + def _set_e2e_device_keys_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + time_now: int, + device_keys: JsonDict, + ) -> bool: + """Stores device keys for a device. Returns whether there was a change + or the keys were already in the database. + + Args: + user_id: user_id of the user to store keys for + device_id: device_id of the device to store keys for + time_now: time at the request to store the keys + device_keys: the keys to store + """ + set_tag("user_id", user_id) + set_tag("device_id", device_id) + set_tag("time_now", time_now) + set_tag("device_keys", str(device_keys)) + + old_key_json = self.db_pool.simple_select_one_onecol_txn( + txn, + table="e2e_device_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id}, + retcol="key_json", + allow_none=True, + ) + + # In py3 we need old_key_json to match new_key_json type. The DB + # returns unicode while encode_canonical_json returns bytes. + new_key_json = encode_canonical_json(device_keys).decode("utf-8") + + if old_key_json == new_key_json: + log_kv({"Message": "Device key already stored."}) + return False + + self.db_pool.simple_upsert_txn( + txn, + table="e2e_device_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id}, + values={"ts_added_ms": time_now, "key_json": new_key_json}, + ) + log_kv({"message": "Device keys stored."}) + return True + async def delete_e2e_keys_by_device(self, user_id: str, device_id: str) -> None: def delete_e2e_keys_by_device_txn(txn: LoggingTransaction) -> None: log_kv( diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 647ee09279..e1e58fa6e6 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -566,15 +566,16 @@ class DehydrationTestCase(unittest.HomeserverTestCase): self.assertEqual(len(res["events"]), 1) self.assertEqual(res["events"][0]["content"]["body"], "foo") - # Fetch the message of the dehydrated device again, which should return nothing - # and delete the old messages + # Fetch the message of the dehydrated device again, which should return + # the same message as it has not been deleted res = self.get_success( self.message_handler.get_events_for_dehydrated_device( requester=requester, device_id=stored_dehydrated_device_id, - since_token=res["next_batch"], + since_token=None, limit=10, ) ) self.assertTrue(len(res["next_batch"]) > 1) - self.assertEqual(len(res["events"]), 0) + self.assertEqual(len(res["events"]), 1) + self.assertEqual(res["events"][0]["content"]["body"], "foo") diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py index 3cf29c10ea..60099f8c59 100644 --- a/tests/rest/client/test_devices.py +++ b/tests/rest/client/test_devices.py @@ -20,7 +20,7 @@ from synapse.api.errors import NotFoundError from synapse.rest import admin, devices, room, sync from synapse.rest.client import account, keys, login, register from synapse.server import HomeServer -from synapse.types import JsonDict, create_requester +from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from tests import unittest @@ -282,6 +282,17 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): "": {":": ""} }, }, + "fallback_keys": { + "alg1:device1": "f4llb4ckk3y", + "signed_:": { + "fallback": "true", + "key": "f4llb4ckk3y", + "signatures": { + "": {":": ""} + }, + }, + }, + "one_time_keys": {"alg1:k1": "0net1m3k3y"}, } channel = self.make_request( "PUT", @@ -312,6 +323,55 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): } self.assertEqual(device_data, expected_device_data) + # test that the keys are correctly uploaded + channel = self.make_request( + "POST", + "/_matrix/client/r0/keys/query", + { + "device_keys": { + user: ["device1"], + }, + }, + token, + ) + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body["device_keys"][user][device_id]["keys"], + content["device_keys"]["keys"], + ) + # first claim should return the onetime key we uploaded + res = self.get_success( + self.hs.get_e2e_keys_handler().claim_one_time_keys( + {user: {device_id: {"alg1": 1}}}, + UserID.from_string(user), + timeout=None, + always_include_fallback_keys=False, + ) + ) + self.assertEqual( + res, + { + "failures": {}, + "one_time_keys": {user: {device_id: {"alg1:k1": "0net1m3k3y"}}}, + }, + ) + # second claim should return fallback key + res2 = self.get_success( + self.hs.get_e2e_keys_handler().claim_one_time_keys( + {user: {device_id: {"alg1": 1}}}, + UserID.from_string(user), + timeout=None, + always_include_fallback_keys=False, + ) + ) + self.assertEqual( + res2, + { + "failures": {}, + "one_time_keys": {user: {device_id: {"alg1:device1": "f4llb4ckk3y"}}}, + }, + ) + # create another device for the user ( new_device_id, @@ -348,10 +408,21 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) expected_content = {"body": "test_message"} self.assertEqual(channel.json_body["events"][0]["content"], expected_content) + + # fetch messages again and make sure that the message was not deleted + channel = self.make_request( + "POST", + f"_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device/{device_id}/events", + content={}, + access_token=token, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["events"][0]["content"], expected_content) next_batch_token = channel.json_body.get("next_batch") - # fetch messages again and make sure that the message was deleted and we are returned an - # empty array + # make sure fetching messages with next batch token works - there are no unfetched + # messages so we should receive an empty array content = {"next_batch": next_batch_token} channel = self.make_request( "POST", From dac97642e41f3f4bc0deff0c80b6a3f7acb4dbc0 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Thu, 10 Aug 2023 11:10:55 +0200 Subject: [PATCH 309/562] Implements admin API to lock an user (MSC3939) (#15870) --- changelog.d/15870.feature | 1 + docs/admin_api/user_admin_api.md | 1 + .../configuration/config_documentation.md | 2 + synapse/_scripts/synapse_port_db.py | 2 +- synapse/api/auth/__init__.py | 1 + synapse/api/auth/internal.py | 15 ++- synapse/api/auth/msc3861_delegated.py | 13 ++ synapse/api/errors.py | 2 + synapse/config/user_directory.py | 1 + synapse/handlers/admin.py | 1 + synapse/handlers/user_directory.py | 5 +- synapse/rest/admin/users.py | 17 +++ synapse/rest/client/logout.py | 8 +- .../storage/databases/main/registration.py | 62 +++++++++- .../storage/databases/main/user_directory.py | 11 +- .../main/delta/80/01_users_alter_locked.sql | 16 +++ tests/api/test_auth.py | 3 + tests/rest/admin/test_user.py | 111 +++++++++++++++++- tests/storage/test_registration.py | 1 + 19 files changed, 262 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15870.feature create mode 100644 synapse/storage/schema/main/delta/80/01_users_alter_locked.sql diff --git a/changelog.d/15870.feature b/changelog.d/15870.feature new file mode 100644 index 0000000000..527220d637 --- /dev/null +++ b/changelog.d/15870.feature @@ -0,0 +1 @@ +Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index ac4f635099..c269ce6af0 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -146,6 +146,7 @@ Body parameters: - `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator, granting them access to the Admin API, among other things. - `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged. +- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged. Note: the `password` field must also be set if both of the following are true: - `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user) diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 2987c9332d..a17a8c2900 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3631,6 +3631,7 @@ This option has the following sub-options: * `prefer_local_users`: Defines whether to prefer local users in search query results. If set to true, local users are more likely to appear above remote users when searching the user directory. Defaults to false. +* `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false. Example configuration: ```yaml @@ -3638,6 +3639,7 @@ user_directory: enabled: false search_all_users: true prefer_local_users: true + show_locked_users: true ``` --- ### `user_consent` diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 22c84fbd5b..1300aaf63c 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -123,7 +123,7 @@ BOOLEAN_COLUMNS = { "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], "rooms": ["is_public", "has_auth_chain_index"], - "users": ["shadow_banned", "approved"], + "users": ["shadow_banned", "approved", "locked"], "un_partial_stated_event_stream": ["rejection_status_changed"], "users_who_share_rooms": ["share_private"], "per_user_experimental_features": ["enabled"], diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py index 90cfe39d76..bb3f50f2dd 100644 --- a/synapse/api/auth/__init__.py +++ b/synapse/api/auth/__init__.py @@ -60,6 +60,7 @@ class Auth(Protocol): request: SynapseRequest, allow_guest: bool = False, allow_expired: bool = False, + allow_locked: bool = False, ) -> Requester: """Get a registered user's ID. diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index e2ae198b19..6a5fd44ec0 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -58,6 +58,7 @@ class InternalAuth(BaseAuth): request: SynapseRequest, allow_guest: bool = False, allow_expired: bool = False, + allow_locked: bool = False, ) -> Requester: """Get a registered user's ID. @@ -79,7 +80,7 @@ class InternalAuth(BaseAuth): parent_span = active_span() with start_active_span("get_user_by_req"): requester = await self._wrapped_get_user_by_req( - request, allow_guest, allow_expired + request, allow_guest, allow_expired, allow_locked ) if parent_span: @@ -107,6 +108,7 @@ class InternalAuth(BaseAuth): request: SynapseRequest, allow_guest: bool, allow_expired: bool, + allow_locked: bool, ) -> Requester: """Helper for get_user_by_req @@ -126,6 +128,17 @@ class InternalAuth(BaseAuth): access_token, allow_expired=allow_expired ) + # Deny the request if the user account is locked. + if not allow_locked and await self.store.get_user_locked_status( + requester.user.to_string() + ): + raise AuthError( + 401, + "User account has been locked", + errcode=Codes.USER_LOCKED, + additional_fields={"soft_logout": True}, + ) + # Deny the request if the user account has expired. # This check is only done for regular users, not appservice ones. if not allow_expired: diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index bd4fc9c0ee..9524102a30 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -27,6 +27,7 @@ from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth from synapse.api.errors import ( AuthError, + Codes, HttpResponseException, InvalidClientTokenError, OAuthInsufficientScopeError, @@ -196,6 +197,7 @@ class MSC3861DelegatedAuth(BaseAuth): request: SynapseRequest, allow_guest: bool = False, allow_expired: bool = False, + allow_locked: bool = False, ) -> Requester: access_token = self.get_access_token_from_request(request) @@ -205,6 +207,17 @@ class MSC3861DelegatedAuth(BaseAuth): # so that we don't provision the user if they don't have enough permission: requester = await self.get_user_by_access_token(access_token, allow_expired) + # Deny the request if the user account is locked. + if not allow_locked and await self.store.get_user_locked_status( + requester.user.to_string() + ): + raise AuthError( + 401, + "User account has been locked", + errcode=Codes.USER_LOCKED, + additional_fields={"soft_logout": True}, + ) + if not allow_guest and requester.is_guest: raise OAuthInsufficientScopeError([SCOPE_MATRIX_API]) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 3546aaf7c3..7ffd72c42c 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -80,6 +80,8 @@ class Codes(str, Enum): WEAK_PASSWORD = "M_WEAK_PASSWORD" INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" + # USER_LOCKED = "M_USER_LOCKED" + USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED" # Part of MSC3848 # https://github.com/matrix-org/matrix-spec-proposals/pull/3848 diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index c9e18b91e9..f60ec2ea66 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -35,3 +35,4 @@ class UserDirectoryConfig(Config): self.user_directory_search_prefer_local_users = user_directory_config.get( "prefer_local_users", False ) + self.show_locked_users = user_directory_config.get("show_locked_users", False) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 119c7f8384..0e812a6d8b 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -67,6 +67,7 @@ class AdminHandler: "name", "admin", "deactivated", + "locked", "shadow_banned", "creation_ts", "appservice_id", diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 05197edc95..a0f5568000 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -94,6 +94,7 @@ class UserDirectoryHandler(StateDeltasHandler): self.is_mine_id = hs.is_mine_id self.update_user_directory = hs.config.worker.should_update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users + self.show_locked_users = hs.config.userdirectory.show_locked_users self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._hs = hs @@ -144,7 +145,9 @@ class UserDirectoryHandler(StateDeltasHandler): ] } """ - results = await self.store.search_user_dir(user_id, search_term, limit) + results = await self.store.search_user_dir( + user_id, search_term, limit, self.show_locked_users + ) # Remove any spammy users from the results. non_spammy_users = [] diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index e0257daa75..04d9ef25b7 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -280,6 +280,17 @@ class UserRestServletV2(RestServlet): HTTPStatus.BAD_REQUEST, "'deactivated' parameter is not of type boolean" ) + lock = body.get("locked", False) + if not isinstance(lock, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "'locked' parameter is not of type boolean" + ) + + if deactivate and lock: + raise SynapseError( + HTTPStatus.BAD_REQUEST, "An user can't be deactivated and locked" + ) + approved: Optional[bool] = None if "approved" in body and self._msc3866_enabled: approved = body["approved"] @@ -397,6 +408,12 @@ class UserRestServletV2(RestServlet): target_user.to_string() ) + if "locked" in body: + if lock and not user["locked"]: + await self.store.set_user_locked_status(user_id, True) + elif not lock and user["locked"]: + await self.store.set_user_locked_status(user_id, False) + if "user_type" in body: await self.store.set_user_type(target_user, user_type) diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index 94ad90942f..2e104d4888 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -40,7 +40,9 @@ class LogoutRestServlet(RestServlet): self._device_handler = handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_expired=True) + requester = await self.auth.get_user_by_req( + request, allow_expired=True, allow_locked=True + ) if requester.device_id is None: # The access token wasn't associated with a device. @@ -67,7 +69,9 @@ class LogoutAllRestServlet(RestServlet): self._device_handler = handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_expired=True) + requester = await self.auth.get_user_by_req( + request, allow_expired=True, allow_locked=True + ) user_id = requester.user.to_string() # first delete all of the user's devices diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index c582cf0573..d3a01d526f 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -205,7 +205,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): name, password_hash, is_guest, admin, consent_version, consent_ts, consent_server_notice_sent, appservice_id, creation_ts, user_type, deactivated, COALESCE(shadow_banned, FALSE) AS shadow_banned, - COALESCE(approved, TRUE) AS approved + COALESCE(approved, TRUE) AS approved, + COALESCE(locked, FALSE) AS locked FROM users WHERE name = ? """, @@ -230,10 +231,15 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): # want to make sure we're returning the right type of data. # Note: when adding a column name to this list, be wary of NULLable columns, # since NULL values will be turned into False. - boolean_columns = ["admin", "deactivated", "shadow_banned", "approved"] + boolean_columns = [ + "admin", + "deactivated", + "shadow_banned", + "approved", + "locked", + ] for column in boolean_columns: - if not isinstance(row[column], bool): - row[column] = bool(row[column]) + row[column] = bool(row[column]) return row @@ -1116,6 +1122,27 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): # Convert the integer into a boolean. return res == 1 + @cached() + async def get_user_locked_status(self, user_id: str) -> bool: + """Retrieve the value for the `locked` property for the provided user. + + Args: + user_id: The ID of the user to retrieve the status for. + + Returns: + True if the user was locked, false if the user is still active. + """ + + res = await self.db_pool.simple_select_one_onecol( + table="users", + keyvalues={"name": user_id}, + retcol="locked", + desc="get_user_locked_status", + ) + + # Convert the potential integer into a boolean. + return bool(res) + async def get_threepid_validation_session( self, medium: Optional[str], @@ -2111,6 +2138,33 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) txn.call_after(self.is_guest.invalidate, (user_id,)) + async def set_user_locked_status(self, user_id: str, locked: bool) -> None: + """Set the `locked` property for the provided user to the provided value. + + Args: + user_id: The ID of the user to set the status for. + locked: The value to set for `locked`. + """ + + await self.db_pool.runInteraction( + "set_user_locked_status", + self.set_user_locked_status_txn, + user_id, + locked, + ) + + def set_user_locked_status_txn( + self, txn: LoggingTransaction, user_id: str, locked: bool + ) -> None: + self.db_pool.simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"locked": locked}, + ) + self._invalidate_cache_and_stream(txn, self.get_user_locked_status, (user_id,)) + self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) + def update_user_approval_status_txn( self, txn: LoggingTransaction, user_id: str, approved: bool ) -> None: diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 2a136f2ff6..f0dc31fee6 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -995,7 +995,11 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): ) async def search_user_dir( - self, user_id: str, search_term: str, limit: int + self, + user_id: str, + search_term: str, + limit: int, + show_locked_users: bool = False, ) -> SearchResult: """Searches for users in directory @@ -1029,6 +1033,9 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): ) """ + if not show_locked_users: + where_clause += " AND (u.locked IS NULL OR u.locked = FALSE)" + # We allow manipulating the ranking algorithm by injecting statements # based on config options. additional_ordering_statements = [] @@ -1060,6 +1067,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): SELECT d.user_id AS user_id, display_name, avatar_url FROM matching_users as t INNER JOIN user_directory AS d USING (user_id) + LEFT JOIN users AS u ON t.user_id = u.name WHERE %(where_clause)s ORDER BY @@ -1115,6 +1123,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): SELECT d.user_id AS user_id, display_name, avatar_url FROM user_directory_search as t INNER JOIN user_directory AS d USING (user_id) + LEFT JOIN users AS u ON t.user_id = u.name WHERE %(where_clause)s AND value MATCH ? diff --git a/synapse/storage/schema/main/delta/80/01_users_alter_locked.sql b/synapse/storage/schema/main/delta/80/01_users_alter_locked.sql new file mode 100644 index 0000000000..21c7971441 --- /dev/null +++ b/synapse/storage/schema/main/delta/80/01_users_alter_locked.sql @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE users ADD locked BOOLEAN DEFAULT FALSE NOT NULL; diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index cdb0048122..ce96574915 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -69,6 +69,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) self.store.get_user_by_access_token = simple_async_mock(user_info) self.store.mark_access_token_as_used = simple_async_mock(None) + self.store.get_user_locked_status = simple_async_mock(False) request = Mock(args={}) request.args[b"access_token"] = [self.test_token] @@ -293,6 +294,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) self.store.insert_client_ip = simple_async_mock(None) self.store.mark_access_token_as_used = simple_async_mock(None) + self.store.get_user_locked_status = simple_async_mock(False) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] @@ -311,6 +313,7 @@ class AuthTestCase(unittest.HomeserverTestCase): token_used=True, ) ) + self.store.get_user_locked_status = simple_async_mock(False) self.store.insert_client_ip = simple_async_mock(None) self.store.mark_access_token_as_used = simple_async_mock(None) request = Mock(args={}) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 9af9db6e3e..41a959b4d6 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -29,7 +29,16 @@ from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError from synapse.api.room_versions import RoomVersions from synapse.media.filepath import MediaFilePaths -from synapse.rest.client import devices, login, logout, profile, register, room, sync +from synapse.rest.client import ( + devices, + login, + logout, + profile, + register, + room, + sync, + user_directory, +) from synapse.server import HomeServer from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock @@ -1477,6 +1486,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): login.register_servlets, sync.register_servlets, register.register_servlets, + user_directory.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -2464,6 +2474,105 @@ class UserRestTestCase(unittest.HomeserverTestCase): # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", channel.json_body) + def test_locked_user(self) -> None: + # User can sync + channel = self.make_request( + "GET", + "/_matrix/client/v3/sync", + access_token=self.other_user_token, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + + # Lock user + channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content={"locked": True}, + ) + + # User is not authorized to sync anymore + channel = self.make_request( + "GET", + "/_matrix/client/v3/sync", + access_token=self.other_user_token, + ) + self.assertEqual(401, channel.code, msg=channel.json_body) + self.assertEqual(Codes.USER_LOCKED, channel.json_body["errcode"]) + self.assertTrue(channel.json_body["soft_logout"]) + + @override_config({"user_directory": {"enabled": True, "search_all_users": True}}) + def test_locked_user_not_in_user_dir(self) -> None: + # User is available in the user dir + channel = self.make_request( + "POST", + "/_matrix/client/v3/user_directory/search", + {"search_term": self.other_user}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertIn("results", channel.json_body) + self.assertEqual(1, len(channel.json_body["results"])) + + # Lock user + channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content={"locked": True}, + ) + + # User is not available anymore in the user dir + channel = self.make_request( + "POST", + "/_matrix/client/v3/user_directory/search", + {"search_term": self.other_user}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertIn("results", channel.json_body) + self.assertEqual(0, len(channel.json_body["results"])) + + @override_config( + { + "user_directory": { + "enabled": True, + "search_all_users": True, + "show_locked_users": True, + } + } + ) + def test_locked_user_in_user_dir_with_show_locked_users_option(self) -> None: + # User is available in the user dir + channel = self.make_request( + "POST", + "/_matrix/client/v3/user_directory/search", + {"search_term": self.other_user}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertIn("results", channel.json_body) + self.assertEqual(1, len(channel.json_body["results"])) + + # Lock user + channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content={"locked": True}, + ) + + # User is still available in the user dir + channel = self.make_request( + "POST", + "/_matrix/client/v3/user_directory/search", + {"search_term": self.other_user}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertIn("results", channel.json_body) + self.assertEqual(1, len(channel.json_body["results"])) + @override_config({"user_directory": {"enabled": True, "search_all_users": True}}) def test_change_name_deactivate_user_user_directory(self) -> None: """ diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 05ea802008..ba41459d08 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -48,6 +48,7 @@ class RegistrationStoreTestCase(HomeserverTestCase): "creation_ts": 0, "user_type": None, "deactivated": 0, + "locked": 0, "shadow_banned": 0, "approved": 1, }, From efd4d06d7694e269f1d85e697104e742a984da18 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Aug 2023 07:39:46 -0400 Subject: [PATCH 310/562] Clean-up presence code (#16092) Misc. clean-ups to: * Use keyword arguments. * Return early (reducing indentation) of some functions. * Removing duplicated / unused code. * Use wrap_as_background_process. --- changelog.d/16092.misc | 1 + synapse/handlers/presence.py | 169 ++++++++++++++++------------------- 2 files changed, 76 insertions(+), 94 deletions(-) create mode 100644 changelog.d/16092.misc diff --git a/changelog.d/16092.misc b/changelog.d/16092.misc new file mode 100644 index 0000000000..b520807771 --- /dev/null +++ b/changelog.d/16092.misc @@ -0,0 +1 @@ +Clean-up the presence code. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index cd7df0525f..11dff724e6 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -30,7 +30,6 @@ from types import TracebackType from typing import ( TYPE_CHECKING, Any, - Awaitable, Callable, Collection, Dict, @@ -54,7 +53,10 @@ from synapse.appservice import ApplicationService from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background from synapse.metrics import LaterGauge -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.replication.http.presence import ( ReplicationBumpPresenceActiveTime, ReplicationPresenceSetState, @@ -141,6 +143,8 @@ class BasePresenceHandler(abc.ABC): self.state = hs.get_state_handler() self.is_mine_id = hs.is_mine_id + self._presence_enabled = hs.config.server.use_presence + self._federation = None if hs.should_send_federation(): self._federation = hs.get_federation_sender() @@ -149,6 +153,15 @@ class BasePresenceHandler(abc.ABC): self._busy_presence_enabled = hs.config.experimental.msc3026_enabled + self.VALID_PRESENCE: Tuple[str, ...] = ( + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + ) + + if self._busy_presence_enabled: + self.VALID_PRESENCE += (PresenceState.BUSY,) + active_presence = self.store.take_presence_startup_info() self.user_to_current_state = {state.user_id: state for state in active_presence} @@ -395,8 +408,6 @@ class WorkerPresenceHandler(BasePresenceHandler): self._presence_writer_instance = hs.config.worker.writers.presence[0] - self._presence_enabled = hs.config.server.use_presence - # Route presence EDUs to the right worker hs.get_federation_registry().register_instances_for_edu( EduTypes.PRESENCE, @@ -421,8 +432,6 @@ class WorkerPresenceHandler(BasePresenceHandler): self.send_stop_syncing, UPDATE_SYNCING_USERS_MS ) - self._busy_presence_enabled = hs.config.experimental.msc3026_enabled - hs.get_reactor().addSystemEventTrigger( "before", "shutdown", @@ -490,7 +499,9 @@ class WorkerPresenceHandler(BasePresenceHandler): # what the spec wants: see comment in the BasePresenceHandler version # of this function. await self.set_state( - UserID.from_string(user_id), {"presence": presence_state}, True + UserID.from_string(user_id), + {"presence": presence_state}, + ignore_status_msg=True, ) curr_sync = self._user_to_num_current_syncs.get(user_id, 0) @@ -601,22 +612,13 @@ class WorkerPresenceHandler(BasePresenceHandler): """ presence = state["presence"] - valid_presence = ( - PresenceState.ONLINE, - PresenceState.UNAVAILABLE, - PresenceState.OFFLINE, - PresenceState.BUSY, - ) - - if presence not in valid_presence or ( - presence == PresenceState.BUSY and not self._busy_presence_enabled - ): + if presence not in self.VALID_PRESENCE: raise SynapseError(400, "Invalid presence state") user_id = target_user.to_string() # If presence is disabled, no-op - if not self.hs.config.server.use_presence: + if not self._presence_enabled: return # Proxy request to instance that writes presence @@ -633,7 +635,7 @@ class WorkerPresenceHandler(BasePresenceHandler): with the app. """ # If presence is disabled, no-op - if not self.hs.config.server.use_presence: + if not self._presence_enabled: return # Proxy request to instance that writes presence @@ -649,7 +651,6 @@ class PresenceHandler(BasePresenceHandler): self.hs = hs self.wheel_timer: WheelTimer[str] = WheelTimer() self.notifier = hs.get_notifier() - self._presence_enabled = hs.config.server.use_presence federation_registry = hs.get_federation_registry() @@ -700,8 +701,6 @@ class PresenceHandler(BasePresenceHandler): self._on_shutdown, ) - self._next_serial = 1 - # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. self.user_to_num_current_syncs: Dict[str, int] = {} @@ -723,21 +722,16 @@ class PresenceHandler(BasePresenceHandler): # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. - def run_timeout_handler() -> Awaitable[None]: - return run_as_background_process( - "handle_presence_timeouts", self._handle_timeouts - ) - self.clock.call_later( - 30, self.clock.looping_call, run_timeout_handler, 5000 + 30, self.clock.looping_call, self._handle_timeouts, 5000 ) - def run_persister() -> Awaitable[None]: - return run_as_background_process( - "persist_presence_changes", self._persist_unpersisted_changes - ) - - self.clock.call_later(60, self.clock.looping_call, run_persister, 60 * 1000) + self.clock.call_later( + 60, + self.clock.looping_call, + self._persist_unpersisted_changes, + 60 * 1000, + ) LaterGauge( "synapse_handlers_presence_wheel_timer_size", @@ -783,6 +777,7 @@ class PresenceHandler(BasePresenceHandler): ) logger.info("Finished _on_shutdown") + @wrap_as_background_process("persist_presence_changes") async def _persist_unpersisted_changes(self) -> None: """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. @@ -898,6 +893,7 @@ class PresenceHandler(BasePresenceHandler): states, [destination] ) + @wrap_as_background_process("handle_presence_timeouts") async def _handle_timeouts(self) -> None: """Checks the presence of users that have timed out and updates as appropriate. @@ -955,7 +951,7 @@ class PresenceHandler(BasePresenceHandler): with the app. """ # If presence is disabled, no-op - if not self.hs.config.server.use_presence: + if not self._presence_enabled: return user_id = user.to_string() @@ -990,56 +986,51 @@ class PresenceHandler(BasePresenceHandler): client that is being used by a user. presence_state: The presence state indicated in the sync request """ - # Override if it should affect the user's presence, if presence is - # disabled. - if not self.hs.config.server.use_presence: - affect_presence = False + if not affect_presence or not self._presence_enabled: + return _NullContextManager() - if affect_presence: - curr_sync = self.user_to_num_current_syncs.get(user_id, 0) - self.user_to_num_current_syncs[user_id] = curr_sync + 1 + curr_sync = self.user_to_num_current_syncs.get(user_id, 0) + self.user_to_num_current_syncs[user_id] = curr_sync + 1 + prev_state = await self.current_state_for_user(user_id) + + # If they're busy then they don't stop being busy just by syncing, + # so just update the last sync time. + if prev_state.state != PresenceState.BUSY: + # XXX: We set_state separately here and just update the last_active_ts above + # This keeps the logic as similar as possible between the worker and single + # process modes. Using set_state will actually cause last_active_ts to be + # updated always, which is not what the spec calls for, but synapse has done + # this for... forever, I think. + await self.set_state( + UserID.from_string(user_id), + {"presence": presence_state}, + ignore_status_msg=True, + ) + # Retrieve the new state for the logic below. This should come from the + # in-memory cache. prev_state = await self.current_state_for_user(user_id) - # If they're busy then they don't stop being busy just by syncing, - # so just update the last sync time. - if prev_state.state != PresenceState.BUSY: - # XXX: We set_state separately here and just update the last_active_ts above - # This keeps the logic as similar as possible between the worker and single - # process modes. Using set_state will actually cause last_active_ts to be - # updated always, which is not what the spec calls for, but synapse has done - # this for... forever, I think. - await self.set_state( - UserID.from_string(user_id), {"presence": presence_state}, True - ) - # Retrieve the new state for the logic below. This should come from the - # in-memory cache. - prev_state = await self.current_state_for_user(user_id) - - # To keep the single process behaviour consistent with worker mode, run the - # same logic as `update_external_syncs_row`, even though it looks weird. - if prev_state.state == PresenceState.OFFLINE: - await self._update_states( - [ - prev_state.copy_and_replace( - state=PresenceState.ONLINE, - last_active_ts=self.clock.time_msec(), - last_user_sync_ts=self.clock.time_msec(), - ) - ] - ) - # otherwise, set the new presence state & update the last sync time, - # but don't update last_active_ts as this isn't an indication that - # they've been active (even though it's probably been updated by - # set_state above) - else: - await self._update_states( - [ - prev_state.copy_and_replace( - last_user_sync_ts=self.clock.time_msec() - ) - ] - ) + # To keep the single process behaviour consistent with worker mode, run the + # same logic as `update_external_syncs_row`, even though it looks weird. + if prev_state.state == PresenceState.OFFLINE: + await self._update_states( + [ + prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=self.clock.time_msec(), + last_user_sync_ts=self.clock.time_msec(), + ) + ] + ) + # otherwise, set the new presence state & update the last sync time, + # but don't update last_active_ts as this isn't an indication that + # they've been active (even though it's probably been updated by + # set_state above) + else: + await self._update_states( + [prev_state.copy_and_replace(last_user_sync_ts=self.clock.time_msec())] + ) async def _end() -> None: try: @@ -1061,8 +1052,7 @@ class PresenceHandler(BasePresenceHandler): try: yield finally: - if affect_presence: - run_in_background(_end) + run_in_background(_end) return _user_syncing() @@ -1229,20 +1219,11 @@ class PresenceHandler(BasePresenceHandler): status_msg = state.get("status_msg", None) presence = state["presence"] - valid_presence = ( - PresenceState.ONLINE, - PresenceState.UNAVAILABLE, - PresenceState.OFFLINE, - PresenceState.BUSY, - ) - - if presence not in valid_presence or ( - presence == PresenceState.BUSY and not self._busy_presence_enabled - ): + if presence not in self.VALID_PRESENCE: raise SynapseError(400, "Invalid presence state") # If presence is disabled, no-op - if not self.hs.config.server.use_presence: + if not self._presence_enabled: return user_id = target_user.to_string() From 7f4b41369049c143919d229670087df69edb9602 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 10 Aug 2023 17:28:31 +0000 Subject: [PATCH 311/562] Fix the type annotation on `run_db_interaction` in the Module API. (#16089) * Fix the method signature of `run_db_interaction` on the module API * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16089.misc | 1 + synapse/module_api/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16089.misc diff --git a/changelog.d/16089.misc b/changelog.d/16089.misc new file mode 100644 index 0000000000..8c302e6884 --- /dev/null +++ b/changelog.d/16089.misc @@ -0,0 +1 @@ +Fix the type annotation on `run_db_interaction` in the Module API. \ No newline at end of file diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index acee1dafd3..9ad8e038ae 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -31,7 +31,7 @@ from typing import ( import attr import jinja2 -from typing_extensions import ParamSpec +from typing_extensions import Concatenate, ParamSpec from twisted.internet import defer from twisted.internet.interfaces import IDelayedCall @@ -885,7 +885,7 @@ class ModuleApi: def run_db_interaction( self, desc: str, - func: Callable[P, T], + func: Callable[Concatenate[LoggingTransaction, P], T], *args: P.args, **kwargs: P.kwargs, ) -> "defer.Deferred[T]": From 614efc488b1a25dfa32256930c5acc896c88d92f Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Fri, 11 Aug 2023 12:37:09 +0100 Subject: [PATCH 312/562] Add linearizer on user ID to push rule PUT/DELETE requests (#16052) See: #16053 Signed off by Nick @ Beeper (@Fizzadar) --- changelog.d/16052.bugfix | 1 + synapse/rest/client/push_rule.py | 28 ++++++++++++++++++++++------ 2 files changed, 23 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16052.bugfix diff --git a/changelog.d/16052.bugfix b/changelog.d/16052.bugfix new file mode 100644 index 0000000000..3c7a60f226 --- /dev/null +++ b/changelog.d/16052.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index 5c9fece3ba..5ed3b83a03 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -32,6 +32,7 @@ from synapse.push.rulekinds import PRIORITY_CLASS_MAP from synapse.rest.client._base import client_patterns from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException from synapse.types import JsonDict +from synapse.util.async_helpers import Linearizer if TYPE_CHECKING: from synapse.server import HomeServer @@ -53,26 +54,32 @@ class PushRuleRestServlet(RestServlet): self.notifier = hs.get_notifier() self._is_worker = hs.config.worker.worker_app is not None self._push_rules_handler = hs.get_push_rules_handler() + self._push_rule_linearizer = Linearizer(name="push_rules") async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: if self._is_worker: raise Exception("Cannot handle PUT /push_rules on worker") + requester = await self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + async with self._push_rule_linearizer.queue(user_id): + return await self.handle_put(request, path, user_id) + + async def handle_put( + self, request: SynapseRequest, path: str, user_id: str + ) -> Tuple[int, JsonDict]: spec = _rule_spec_from_path(path.split("/")) try: priority_class = _priority_class_from_spec(spec) except InvalidRuleException as e: raise SynapseError(400, str(e)) - requester = await self.auth.get_user_by_req(request) - if "/" in spec.rule_id or "\\" in spec.rule_id: raise SynapseError(400, "rule_id may not contain slashes") content = parse_json_value_from_request(request) - user_id = requester.user.to_string() - if spec.attr: try: await self._push_rules_handler.set_rule_attr(user_id, spec, content) @@ -126,11 +133,20 @@ class PushRuleRestServlet(RestServlet): if self._is_worker: raise Exception("Cannot handle DELETE /push_rules on worker") - spec = _rule_spec_from_path(path.split("/")) - requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() + async with self._push_rule_linearizer.queue(user_id): + return await self.handle_delete(request, path, user_id) + + async def handle_delete( + self, + request: SynapseRequest, + path: str, + user_id: str, + ) -> Tuple[int, JsonDict]: + spec = _rule_spec_from_path(path.split("/")) + namespaced_rule_id = f"global/{spec.template}/{spec.rule_id}" try: From 9ff84bccbb152460913d63d4b8e9dffc220adfea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gabriel=20Rodr=C3=ADguez?= Date: Fri, 11 Aug 2023 16:15:17 -0400 Subject: [PATCH 313/562] Allow customizing IdP name and icon for SAML and CAS (#16094) --- changelog.d/16094.feature | 1 + .../configuration/config_documentation.md | 22 ++++++++++++++++++- synapse/config/cas.py | 4 ++++ synapse/config/saml2.py | 6 +++++ synapse/handlers/cas.py | 11 +++++----- synapse/handlers/saml.py | 11 +++++----- 6 files changed, 44 insertions(+), 11 deletions(-) create mode 100644 changelog.d/16094.feature diff --git a/changelog.d/16094.feature b/changelog.d/16094.feature new file mode 100644 index 0000000000..3be71badb9 --- /dev/null +++ b/changelog.d/16094.feature @@ -0,0 +1 @@ +Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index a17a8c2900..6601bba9f2 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3025,6 +3025,16 @@ enable SAML login. You can either put your entire pysaml config inline using the option, or you can specify a path to a psyaml config file with the sub-option `config_path`. This setting has the following sub-options: +* `idp_name`: A user-facing name for this identity provider, which is used to + offer the user a choice of login mechanisms. +* `idp_icon`: An optional icon for this identity provider, which is presented + by clients and Synapse's own IdP picker page. If given, must be an + MXC URI of the format `mxc:///`. (An easy way to + obtain such an MXC URI is to upload an image to an (unencrypted) room + and then copy the "url" from the source of the event.) +* `idp_brand`: An optional brand for this identity provider, allowing clients + to style the login flow according to the identity provider in question. + See the [spec](https://spec.matrix.org/latest/) for possible options here. * `sp_config`: the configuration for the pysaml2 Service Provider. See pysaml2 docs for format of config. Default values will be used for the `entityid` and `service` settings, so it is not normally necessary to specify them unless you need to @@ -3176,7 +3186,7 @@ Options for each entry include: * `idp_icon`: An optional icon for this identity provider, which is presented by clients and Synapse's own IdP picker page. If given, must be an - MXC URI of the format mxc:///. (An easy way to + MXC URI of the format `mxc:///`. (An easy way to obtain such an MXC URI is to upload an image to an (unencrypted) room and then copy the "url" from the source of the event.) @@ -3391,6 +3401,16 @@ Enable Central Authentication Service (CAS) for registration and login. Has the following sub-options: * `enabled`: Set this to true to enable authorization against a CAS server. Defaults to false. +* `idp_name`: A user-facing name for this identity provider, which is used to + offer the user a choice of login mechanisms. +* `idp_icon`: An optional icon for this identity provider, which is presented + by clients and Synapse's own IdP picker page. If given, must be an + MXC URI of the format `mxc:///`. (An easy way to + obtain such an MXC URI is to upload an image to an (unencrypted) room + and then copy the "url" from the source of the event.) +* `idp_brand`: An optional brand for this identity provider, allowing clients + to style the login flow according to the identity provider in question. + See the [spec](https://spec.matrix.org/latest/) for possible options here. * `server_url`: The URL of the CAS authorization endpoint. * `displayname_attribute`: The attribute of the CAS response to use as the display name. If no name is given here, no displayname will be set. diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 9152c06bd6..c4e63e7411 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -47,6 +47,10 @@ class CasConfig(Config): required_attributes ) + self.idp_name = cas_config.get("idp_name", "CAS") + self.idp_icon = cas_config.get("idp_icon") + self.idp_brand = cas_config.get("idp_brand") + else: self.cas_server_url = None self.cas_service_url = None diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py index 49ca663dde..c69e24cf26 100644 --- a/synapse/config/saml2.py +++ b/synapse/config/saml2.py @@ -89,8 +89,14 @@ class SAML2Config(Config): "grandfathered_mxid_source_attribute", "uid" ) + # refers to a SAML IdP entity ID self.saml2_idp_entityid = saml2_config.get("idp_entityid", None) + # IdP properties for Matrix clients + self.idp_name = saml2_config.get("idp_name", "SAML") + self.idp_icon = saml2_config.get("idp_icon") + self.idp_brand = saml2_config.get("idp_brand") + # user_mapping_provider may be None if the key is present but has no value ump_dict = saml2_config.get("user_mapping_provider") or {} diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py index fc467bc7c1..5c71637038 100644 --- a/synapse/handlers/cas.py +++ b/synapse/handlers/cas.py @@ -76,12 +76,13 @@ class CasHandler: self.idp_id = "cas" # user-facing name of this auth provider - self.idp_name = "CAS" + self.idp_name = hs.config.cas.idp_name - # we do not currently support brands/icons for CAS auth, but this is required by - # the SsoIdentityProvider protocol type. - self.idp_icon = None - self.idp_brand = None + # MXC URI for icon for this auth provider + self.idp_icon = hs.config.cas.idp_icon + + # optional brand identifier for this auth provider + self.idp_brand = hs.config.cas.idp_brand self._sso_handler = hs.get_sso_handler() diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 6083c9f4b5..d00035c332 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -74,12 +74,13 @@ class SamlHandler: self.idp_id = "saml" # user-facing name of this auth provider - self.idp_name = "SAML" + self.idp_name = hs.config.saml2.idp_name - # we do not currently support icons/brands for SAML auth, but this is required by - # the SsoIdentityProvider protocol type. - self.idp_icon = None - self.idp_brand = None + # MXC URI for icon for this auth provider + self.idp_icon = hs.config.saml2.idp_icon + + # optional brand identifier for this auth provider + self.idp_brand = hs.config.saml2.idp_brand # a map from saml session id to Saml2SessionData object self._outstanding_requests_dict: Dict[str, Saml2SessionData] = {} From d834a80a12b395203bc7c3b3640778c04704476a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 07:30:02 -0400 Subject: [PATCH 314/562] Bump isort from 5.11.5 to 5.12.0 (#16108) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 71b47a5805..90b52ead38 100644 --- a/poetry.lock +++ b/poetry.lock @@ -887,17 +887,17 @@ scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] [[package]] name = "isort" -version = "5.11.5" +version = "5.12.0" description = "A Python utility / library to sort Python imports." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, - {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, + {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"}, + {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"}, ] [package.extras] -colors = ["colorama (>=0.4.3,<0.5.0)"] +colors = ["colorama (>=0.4.3)"] pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] plugins = ["setuptools"] requirements-deprecated-finder = ["pip-api", "pipreqs"] From b80ff1602efe31af081573b32953fe731395a524 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 08:03:18 -0400 Subject: [PATCH 315/562] Bump types-pillow from 10.0.0.1 to 10.0.0.2 (#16105) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 90b52ead38..8d874768e3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2991,13 +2991,13 @@ files = [ [[package]] name = "types-pillow" -version = "10.0.0.1" +version = "10.0.0.2" description = "Typing stubs for Pillow" optional = false python-versions = "*" files = [ - {file = "types-Pillow-10.0.0.1.tar.gz", hash = "sha256:834a07a04504f8bf37936679bc6a5802945e7644d0727460c0c4d4307967e2a3"}, - {file = "types_Pillow-10.0.0.1-py3-none-any.whl", hash = "sha256:be576b67418f1cb3b93794cf7946581be1009a33a10085b3c132eb0875a819b4"}, + {file = "types-Pillow-10.0.0.2.tar.gz", hash = "sha256:fe09380ab22d412ced989a067e9ee4af719fa3a47ba1b53b232b46514a871042"}, + {file = "types_Pillow-10.0.0.2-py3-none-any.whl", hash = "sha256:29d51a3ce6ef51fabf728a504d33b4836187ff14256b2e86996d55c91ab214b1"}, ] [[package]] From e21ff0f048d404944d627898ca37114e738a4114 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 08:04:24 -0400 Subject: [PATCH 316/562] Bump types-bleach from 6.0.0.3 to 6.0.0.4 (#16106) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8d874768e3..b2f5005b87 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2936,13 +2936,13 @@ twisted = "*" [[package]] name = "types-bleach" -version = "6.0.0.3" +version = "6.0.0.4" description = "Typing stubs for bleach" optional = false python-versions = "*" files = [ - {file = "types-bleach-6.0.0.3.tar.gz", hash = "sha256:8ce7896d4f658c562768674ffcf07492c7730e128018f03edd163ff912bfadee"}, - {file = "types_bleach-6.0.0.3-py3-none-any.whl", hash = "sha256:d43eaf30a643ca824e16e2dcdb0c87ef9226237e2fa3ac4732a50cb3f32e145f"}, + {file = "types-bleach-6.0.0.4.tar.gz", hash = "sha256:357b0226f65c4f20ab3b13ca8d78a6b91c78aad256d8ec168d4e90fc3303ebd4"}, + {file = "types_bleach-6.0.0.4-py3-none-any.whl", hash = "sha256:2b8767eb407c286b7f02803678732e522e04db8d56cbc9f1270bee49627eae92"}, ] [[package]] From 6fc411c7bf54c138a3c5040f99b9f78a37aba5e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 08:43:08 -0400 Subject: [PATCH 317/562] Bump gitpython from 3.1.31 to 3.1.32 (#16103) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index b2f5005b87..7b81d262d7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -589,13 +589,13 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.31" +version = "3.1.32" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"}, - {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"}, + {file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"}, + {file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"}, ] [package.dependencies] From 4ce32ade5a66bdd65274479288be45e1e8b406c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:57:39 -0400 Subject: [PATCH 318/562] Bump txredisapi from 1.4.9 to 1.4.10 (#16107) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7b81d262d7..db1332a04b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2921,13 +2921,13 @@ files = [ [[package]] name = "txredisapi" -version = "1.4.9" +version = "1.4.10" description = "non-blocking redis client for python" optional = true python-versions = "*" files = [ - {file = "txredisapi-1.4.9-py3-none-any.whl", hash = "sha256:72e6ad09cc5fffe3bec2e55e5bfb74407bd357565fc212e6003f7e26ef7d8f78"}, - {file = "txredisapi-1.4.9.tar.gz", hash = "sha256:c9607062d05e4d0b8ef84719eb76a3fe7d5ccd606a2acf024429da51d6e84559"}, + {file = "txredisapi-1.4.10-py3-none-any.whl", hash = "sha256:0a6ea77f27f8cf092f907654f08302a97b48fa35f24e0ad99dfb74115f018161"}, + {file = "txredisapi-1.4.10.tar.gz", hash = "sha256:7609a6af6ff4619a3189c0adfb86aeda789afba69eb59fc1e19ac0199e725395"}, ] [package.dependencies] From 837f28ce748fdf14c3739787155455aee043dd3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Aug 2023 10:58:32 +0100 Subject: [PATCH 319/562] Bump log from 0.4.19 to 0.4.20 (#16109) Bumps [log](https://github.com/rust-lang/log) from 0.4.19 to 0.4.20. - [Release notes](https://github.com/rust-lang/log/releases) - [Changelog](https://github.com/rust-lang/log/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/log/compare/0.4.19...0.4.20) --- updated-dependencies: - dependency-name: log dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45e0f116e6..79d9cefcf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,9 +132,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" From 29638220ab31aea16cd3da073025e9cb734628d5 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 15 Aug 2023 11:17:54 +0100 Subject: [PATCH 320/562] 1.90.0 --- CHANGES.md | 5 +++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 95d8227ee0..666cd31ba0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,8 @@ +# Synapse 1.90.0 (2023-08-15) + +No significant changes since 1.90.0rc1. + + # Synapse 1.90.0rc1 (2023-08-08) ### Features diff --git a/debian/changelog b/debian/changelog index ed35abc9ee..ad9a4b3c8c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.90.0) stable; urgency=medium + + * New Synapse release 1.90.0. + + -- Synapse Packaging team Tue, 15 Aug 2023 11:17:34 +0100 + matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium * New Synapse release 1.90.0rc1. diff --git a/pyproject.toml b/pyproject.toml index ca532e2c7c..86680cb8e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.90.0rc1" +version = "1.90.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From ad3f43be9a597dd4fdf59e0a95e4630e7b9502fe Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 15 Aug 2023 08:11:20 -0400 Subject: [PATCH 321/562] Run pyupgrade for python 3.7 & 3.8. (#16110) --- changelog.d/16110.misc | 1 + contrib/cmdclient/console.py | 2 +- docker/configure_workers_and_start.py | 2 +- docker/start.py | 2 +- scripts-dev/build_debian_packages.py | 2 +- scripts-dev/check_schema_delta.py | 2 +- scripts-dev/federation_client.py | 2 +- scripts-dev/release.py | 1 - scripts-dev/sign_json.py | 2 +- synapse/__init__.py | 8 +++- synapse/_scripts/synapse_port_db.py | 6 +-- synapse/_scripts/update_synapse_database.py | 2 +- synapse/api/constants.py | 3 +- synapse/handlers/presence.py | 2 +- synapse/handlers/sso.py | 5 +- synapse/handlers/stats.py | 12 +++-- synapse/handlers/sync.py | 8 ++-- synapse/logging/_remote.py | 3 +- .../callbacks/spamchecker_callbacks.py | 48 +++++-------------- synapse/replication/tcp/handler.py | 2 +- synapse/storage/databases/main/filtering.py | 3 +- synapse/storage/databases/main/keys.py | 2 +- synapse/storage/databases/main/stats.py | 3 +- synapse/storage/engines/_base.py | 2 +- synapse/storage/prepare_database.py | 12 ++++- synapse/types/__init__.py | 3 +- synapse/util/async_helpers.py | 3 +- synapse/util/macaroons.py | 2 +- synapse/util/ratelimitutils.py | 2 +- synapse/visibility.py | 2 +- tests/app/test_phone_stats_home.py | 2 +- tests/crypto/test_keyring.py | 2 +- .../test_matrix_federation_agent.py | 2 +- tests/module_api/test_api.py | 2 +- tests/replication/test_multi_media_repo.py | 2 +- tests/rest/client/test_redactions.py | 10 ++-- tests/rest/client/test_relations.py | 38 +++++++-------- tests/rest/client/test_rooms.py | 6 +-- tests/server.py | 3 +- tests/storage/test_appservice.py | 6 +-- tests/storage/test_main.py | 2 +- tests/storage/test_room_search.py | 8 ++-- tests/test_visibility.py | 2 +- 43 files changed, 113 insertions(+), 121 deletions(-) create mode 100644 changelog.d/16110.misc diff --git a/changelog.d/16110.misc b/changelog.d/16110.misc new file mode 100644 index 0000000000..68efe86ddc --- /dev/null +++ b/changelog.d/16110.misc @@ -0,0 +1 @@ +Run `pyupgrade` for Python 3.8+. diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index 895b2a7af1..710fe25699 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -769,7 +769,7 @@ def main(server_url, identity_server_url, username, token, config_path): global CONFIG_JSON CONFIG_JSON = config_path # bit cheeky, but just overwrite the global try: - with open(config_path, "r") as config: + with open(config_path) as config: syn_cmd.config = json.load(config) try: http_client.verbose = "on" == syn_cmd.config["verbose"] diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index dc824038b5..400a7515aa 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -861,7 +861,7 @@ def generate_worker_files( # Then a worker config file convert( "/conf/worker.yaml.j2", - "/conf/workers/{name}.yaml".format(name=worker_name), + f"/conf/workers/{worker_name}.yaml", **worker_config, worker_log_config_filepath=log_config_filepath, using_unix_sockets=using_unix_sockets, diff --git a/docker/start.py b/docker/start.py index ebcc599f04..aebc7e4aaa 100755 --- a/docker/start.py +++ b/docker/start.py @@ -82,7 +82,7 @@ def generate_config_from_template( with open(filename) as handle: value = handle.read() else: - log("Generating a random secret for {}".format(secret)) + log(f"Generating a random secret for {secret}") value = codecs.encode(os.urandom(32), "hex").decode() with open(filename, "w") as handle: handle.write(value) diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index bb89ba581c..c03e3418c0 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -47,7 +47,7 @@ can be passed on the commandline for debugging. projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -class Builder(object): +class Builder: def __init__( self, redirect_stdout: bool = False, diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py index fee4a8bd3d..467be96fdf 100755 --- a/scripts-dev/check_schema_delta.py +++ b/scripts-dev/check_schema_delta.py @@ -43,7 +43,7 @@ def main(force_colors: bool) -> None: diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None) # Get the schema version of the local file to check against current schema on develop - with open("synapse/storage/schema/__init__.py", "r") as file: + with open("synapse/storage/schema/__init__.py") as file: local_schema = file.read() new_locals: Dict[str, Any] = {} exec(local_schema, new_locals) diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index 63f0b25ddd..5ad334b4d8 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -247,7 +247,7 @@ def main() -> None: def read_args_from_config(args: argparse.Namespace) -> None: - with open(args.config, "r") as fh: + with open(args.config) as fh: config = yaml.safe_load(fh) if not args.server_name: diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 89ffba8d92..4ac8eaa889 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts-dev/sign_json.py b/scripts-dev/sign_json.py index bb217799fb..00cbaf68f5 100755 --- a/scripts-dev/sign_json.py +++ b/scripts-dev/sign_json.py @@ -145,7 +145,7 @@ Example usage: def read_args_from_config(args: argparse.Namespace) -> None: - with open(args.config, "r") as fh: + with open(args.config) as fh: config = yaml.safe_load(fh) if not args.server_name: args.server_name = config["server_name"] diff --git a/synapse/__init__.py b/synapse/__init__.py index 6c1801862b..2f9c22a833 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -25,7 +25,11 @@ from synapse.util.rust import check_rust_lib_up_to_date from synapse.util.stringutils import strtobool # Check that we're not running on an unsupported Python version. -if sys.version_info < (3, 8): +# +# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the +# if-statement completely. +py_version = sys.version_info +if py_version < (3, 8): print("Synapse requires Python 3.8 or above.") sys.exit(1) @@ -78,7 +82,7 @@ try: except ImportError: pass -import synapse.util +import synapse.util # noqa: E402 __version__ = synapse.util.SYNAPSE_VERSION diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 1300aaf63c..49242800b8 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -1205,10 +1205,10 @@ class CursesProgress(Progress): self.total_processed = 0 self.total_remaining = 0 - super(CursesProgress, self).__init__() + super().__init__() def update(self, table: str, num_done: int) -> None: - super(CursesProgress, self).update(table, num_done) + super().update(table, num_done) self.total_processed = 0 self.total_remaining = 0 @@ -1304,7 +1304,7 @@ class TerminalProgress(Progress): """Just prints progress to the terminal""" def update(self, table: str, num_done: int) -> None: - super(TerminalProgress, self).update(table, num_done) + super().update(table, num_done) data = self.tables[table] diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index 0adf94bba6..f97aecf8d5 100644 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -38,7 +38,7 @@ class MockHomeserver(HomeServer): DATASTORE_CLASS = DataStore # type: ignore [assignment] def __init__(self, config: HomeServerConfig): - super(MockHomeserver, self).__init__( + super().__init__( hostname=config.server.server_name, config=config, reactor=reactor, diff --git a/synapse/api/constants.py b/synapse/api/constants.py index dc32553d0c..bf311b636d 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -18,8 +18,7 @@ """Contains constants from the specification.""" import enum - -from typing_extensions import Final +from typing import Final # the max size of a (canonical-json-encoded) event MAX_PDU_SIZE = 65536 diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 11dff724e6..e8e9db4b91 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -32,6 +32,7 @@ from typing import ( Any, Callable, Collection, + ContextManager, Dict, Generator, Iterable, @@ -43,7 +44,6 @@ from typing import ( ) from prometheus_client import Counter -from typing_extensions import ContextManager import synapse.metrics from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 4d29328a74..e9a544e754 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -24,13 +24,14 @@ from typing import ( Iterable, List, Mapping, + NoReturn, Optional, Set, ) from urllib.parse import urlencode import attr -from typing_extensions import NoReturn, Protocol +from typing_extensions import Protocol from twisted.web.iweb import IRequest from twisted.web.server import Request @@ -791,7 +792,7 @@ class SsoHandler: if code != 200: raise Exception( - "GET request to download sso avatar image returned {}".format(code) + f"GET request to download sso avatar image returned {code}" ) # upload name includes hash of the image file's content so that we can diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 7cabf7980a..3dde19fc81 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -14,9 +14,15 @@ # limitations under the License. import logging from collections import Counter -from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Tuple - -from typing_extensions import Counter as CounterType +from typing import ( + TYPE_CHECKING, + Any, + Counter as CounterType, + Dict, + Iterable, + Optional, + Tuple, +) from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.metrics import event_processing_positions diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c010405be6..8174248387 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1442,11 +1442,9 @@ class SyncHandler: # Now we have our list of joined room IDs, exclude as configured and freeze joined_room_ids = frozenset( - ( - room_id - for room_id in mutable_joined_room_ids - if room_id not in mutable_rooms_to_exclude - ) + room_id + for room_id in mutable_joined_room_ids + if room_id not in mutable_rooms_to_exclude ) logger.debug( diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index 5a61b21eaf..284fbac524 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -18,10 +18,9 @@ import traceback from collections import deque from ipaddress import IPv4Address, IPv6Address, ip_address from math import floor -from typing import Callable, Optional +from typing import Callable, Deque, Optional import attr -from typing_extensions import Deque from zope.interface import implementer from twisted.application.internet import ClientService diff --git a/synapse/module_api/callbacks/spamchecker_callbacks.py b/synapse/module_api/callbacks/spamchecker_callbacks.py index e191450323..32db7cce8d 100644 --- a/synapse/module_api/callbacks/spamchecker_callbacks.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py @@ -426,9 +426,7 @@ class SpamCheckerModuleApiCallbacks: generally discouraged as it doesn't support internationalization. """ for callback in self._check_event_for_spam_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation(callback(event)) if res is False or res == self.NOT_SPAM: # This spam-checker accepts the event. @@ -481,9 +479,7 @@ class SpamCheckerModuleApiCallbacks: True if the event should be silently dropped """ for callback in self._should_drop_federated_event_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res: Union[bool, str] = await delay_cancellation(callback(event)) if res: return res @@ -505,9 +501,7 @@ class SpamCheckerModuleApiCallbacks: NOT_SPAM if the operation is permitted, [Codes, Dict] otherwise. """ for callback in self._user_may_join_room_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation(callback(user_id, room_id, is_invited)) # Normalize return values to `Codes` or `"NOT_SPAM"`. if res is True or res is self.NOT_SPAM: @@ -546,9 +540,7 @@ class SpamCheckerModuleApiCallbacks: NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_invite_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation( callback(inviter_userid, invitee_userid, room_id) ) @@ -593,9 +585,7 @@ class SpamCheckerModuleApiCallbacks: NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_send_3pid_invite_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation( callback(inviter_userid, medium, address, room_id) ) @@ -630,9 +620,7 @@ class SpamCheckerModuleApiCallbacks: userid: The ID of the user attempting to create a room """ for callback in self._user_may_create_room_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation(callback(userid)) if res is True or res is self.NOT_SPAM: continue @@ -666,9 +654,7 @@ class SpamCheckerModuleApiCallbacks: """ for callback in self._user_may_create_room_alias_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation(callback(userid, room_alias)) if res is True or res is self.NOT_SPAM: continue @@ -701,9 +687,7 @@ class SpamCheckerModuleApiCallbacks: room_id: The ID of the room that would be published """ for callback in self._user_may_publish_room_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation(callback(userid, room_id)) if res is True or res is self.NOT_SPAM: continue @@ -742,9 +726,7 @@ class SpamCheckerModuleApiCallbacks: True if the user is spammy. """ for callback in self._check_username_for_spam_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): # Make a copy of the user profile object to ensure the spam checker cannot # modify it. res = await delay_cancellation(callback(user_profile.copy())) @@ -776,9 +758,7 @@ class SpamCheckerModuleApiCallbacks: """ for callback in self._check_registration_for_spam_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): behaviour = await delay_cancellation( callback(email_threepid, username, request_info, auth_provider_id) ) @@ -820,9 +800,7 @@ class SpamCheckerModuleApiCallbacks: """ for callback in self._check_media_file_for_spam_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation(callback(file_wrapper, file_info)) # Normalize return values to `Codes` or `"NOT_SPAM"`. if res is False or res is self.NOT_SPAM: @@ -869,9 +847,7 @@ class SpamCheckerModuleApiCallbacks: """ for callback in self._check_login_for_spam_callbacks: - with Measure( - self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) - ): + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): res = await delay_cancellation( callback( user_id, diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index a2cabba7b1..38adcbe1d0 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -17,6 +17,7 @@ from typing import ( TYPE_CHECKING, Any, Awaitable, + Deque, Dict, Iterable, Iterator, @@ -29,7 +30,6 @@ from typing import ( ) from prometheus_client import Counter -from typing_extensions import Deque from twisted.internet.protocol import ReconnectingClientFactory diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index fff417f9e3..047de6283a 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -13,10 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Optional, Tuple, Union, cast from canonicaljson import encode_canonical_json -from typing_extensions import TYPE_CHECKING from synapse.api.errors import Codes, StoreError, SynapseError from synapse.storage._base import SQLBaseStore, db_to_json diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 1666e3c43b..cea32a034a 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -188,7 +188,7 @@ class KeyStore(SQLBaseStore): # invalidate takes a tuple corresponding to the params of # _get_server_keys_json. _get_server_keys_json only takes one # param, which is itself the 2-tuple (server_name, key_id). - self._get_server_keys_json.invalidate((((server_name, key_id),))) + self._get_server_keys_json.invalidate(((server_name, key_id),)) @cached() def _get_server_keys_json( diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index f34b7ce8f4..6298f0984d 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -19,6 +19,7 @@ from itertools import chain from typing import ( TYPE_CHECKING, Any, + Counter, Dict, Iterable, List, @@ -28,8 +29,6 @@ from typing import ( cast, ) -from typing_extensions import Counter - from twisted.internet.defer import DeferredLock from synapse.api.constants import Direction, EventContentFields, EventTypes, Membership diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 0363cdc038..0b5b3bf03e 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -145,5 +145,5 @@ class BaseDatabaseEngine(Generic[ConnectionType, CursorType], metaclass=abc.ABCM This is not provided by DBAPI2, and so needs engine-specific support. """ - with open(filepath, "rt") as f: + with open(filepath) as f: cls.executescript(cursor, f.read()) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 38b7abd801..31501fd573 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -16,10 +16,18 @@ import logging import os import re from collections import Counter -from typing import Collection, Generator, Iterable, List, Optional, TextIO, Tuple +from typing import ( + Collection, + Counter as CounterType, + Generator, + Iterable, + List, + Optional, + TextIO, + Tuple, +) import attr -from typing_extensions import Counter as CounterType from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingDatabaseConnection, LoggingTransaction diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 39a1ae4ac3..073f682aca 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -21,6 +21,7 @@ from typing import ( Any, ClassVar, Dict, + Final, List, Mapping, Match, @@ -38,7 +39,7 @@ import attr from immutabledict import immutabledict from signedjson.key import decode_verify_key_bytes from signedjson.types import VerifyKey -from typing_extensions import Final, TypedDict +from typing_extensions import TypedDict from unpaddedbase64 import decode_base64 from zope.interface import Interface diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 4041e49e71..943ad54456 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -22,6 +22,7 @@ import logging from contextlib import asynccontextmanager from typing import ( Any, + AsyncContextManager, AsyncIterator, Awaitable, Callable, @@ -42,7 +43,7 @@ from typing import ( ) import attr -from typing_extensions import AsyncContextManager, Concatenate, Literal, ParamSpec +from typing_extensions import Concatenate, Literal, ParamSpec from twisted.internet import defer from twisted.internet.defer import CancelledError diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py index 644c341e8c..db6c40a3e1 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py @@ -218,7 +218,7 @@ class MacaroonGenerator: # to avoid validating those as guest tokens, we explicitely verify if # the macaroon includes the "guest = true" caveat. is_guest = any( - (caveat.caveat_id == "guest = true" for caveat in macaroon.caveats) + caveat.caveat_id == "guest = true" for caveat in macaroon.caveats ) if not is_guest: diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 2ad55ac13e..cde4a0780f 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -20,6 +20,7 @@ import typing from typing import ( Any, Callable, + ContextManager, DefaultDict, Dict, Iterator, @@ -33,7 +34,6 @@ from typing import ( from weakref import WeakSet from prometheus_client.core import Counter -from typing_extensions import ContextManager from twisted.internet import defer diff --git a/synapse/visibility.py b/synapse/visibility.py index fc71dc92a4..eac10f6438 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -17,6 +17,7 @@ from enum import Enum, auto from typing import ( Collection, Dict, + Final, FrozenSet, List, Mapping, @@ -27,7 +28,6 @@ from typing import ( ) import attr -from typing_extensions import Final from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.events import EventBase diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py index 9305b758d7..93af614def 100644 --- a/tests/app/test_phone_stats_home.py +++ b/tests/app/test_phone_stats_home.py @@ -26,7 +26,7 @@ class PhoneHomeR30V2TestCase(HomeserverTestCase): def make_homeserver( self, reactor: ThreadedMemoryReactorClock, clock: Clock ) -> HomeServer: - hs = super(PhoneHomeR30V2TestCase, self).make_homeserver(reactor, clock) + hs = super().make_homeserver(reactor, clock) # We don't want our tests to actually report statistics, so check # that it's not enabled diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 7c63b2ea4c..fdfd4f911d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -312,7 +312,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): [("server9", get_key_id(key1))] ) result = self.get_success(d) - self.assertEquals(result[("server9", get_key_id(key1))].valid_until_ts, 0) + self.assertEqual(result[("server9", get_key_id(key1))].valid_until_ts, 0) def test_verify_json_dedupes_key_requests(self) -> None: """Two requests for the same key should be deduped.""" diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index aed2a4c07a..6a0b5fc0bd 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -514,7 +514,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.assertEqual(response.code, 200) # Send the body - request.write('{ "a": 1 }'.encode("ascii")) + request.write(b'{ "a": 1 }') request.finish() self.reactor.pump((0.1,)) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index b3310abe1b..fe631d7ecb 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -757,7 +757,7 @@ class ModuleApiTestCase(BaseModuleApiTestCase): self.assertEqual(channel.json_body["creator"], user_id) # Check room alias. - self.assertEquals(room_alias, f"#foo-bar:{self.module_api.server_name}") + self.assertEqual(room_alias, f"#foo-bar:{self.module_api.server_name}") # Let's try a room with no alias. room_id, room_alias = self.get_success( diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 1527b4a82d..6e78daa830 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -116,7 +116,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): self.assertEqual(request.method, b"GET") self.assertEqual( request.path, - f"/_matrix/media/r0/download/{target}/{media_id}".encode("utf-8"), + f"/_matrix/media/r0/download/{target}/{media_id}".encode(), ) self.assertEqual( request.requestHeaders.getRawHeaders(b"host"), [target.encode("utf-8")] diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 180b635ea6..4e0a387bd3 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -627,8 +627,8 @@ class RedactionsTestCase(HomeserverTestCase): redact_event = timeline[-1] self.assertEqual(redact_event["type"], EventTypes.Redaction) # The redacts key should be in the content and the redacts keys. - self.assertEquals(redact_event["content"]["redacts"], event_id) - self.assertEquals(redact_event["redacts"], event_id) + self.assertEqual(redact_event["content"]["redacts"], event_id) + self.assertEqual(redact_event["redacts"], event_id) # But it isn't actually part of the event. def get_event(txn: LoggingTransaction) -> JsonDict: @@ -642,10 +642,10 @@ class RedactionsTestCase(HomeserverTestCase): event_json = self.get_success( main_datastore.db_pool.runInteraction("get_event", get_event) ) - self.assertEquals(event_json["type"], EventTypes.Redaction) + self.assertEqual(event_json["type"], EventTypes.Redaction) if expect_content: self.assertNotIn("redacts", event_json) - self.assertEquals(event_json["content"]["redacts"], event_id) + self.assertEqual(event_json["content"]["redacts"], event_id) else: - self.assertEquals(event_json["redacts"], event_id) + self.assertEqual(event_json["redacts"], event_id) self.assertNotIn("redacts", event_json["content"]) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 75439416c1..9bfe913e45 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -129,7 +129,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) return [ev["event_id"] for ev in channel.json_body["chunk"]] def _get_bundled_aggregations(self) -> JsonDict: @@ -142,7 +142,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): f"/_matrix/client/v3/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) return channel.json_body["unsigned"].get("m.relations", {}) def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: @@ -1602,7 +1602,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) threads = channel.json_body["chunk"] return [ ( @@ -1634,7 +1634,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): ################################################## # Check the test data is configured as expected. # ################################################## - self.assertEquals(self._get_related_events(), list(reversed(thread_replies))) + self.assertEqual(self._get_related_events(), list(reversed(thread_replies))) relations = self._get_bundled_aggregations() self.assertDictContainsSubset( {"count": 3, "current_user_participated": True}, @@ -1655,7 +1655,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(thread_replies.pop()) # The thread should still exist, but the latest event should be updated. - self.assertEquals(self._get_related_events(), list(reversed(thread_replies))) + self.assertEqual(self._get_related_events(), list(reversed(thread_replies))) relations = self._get_bundled_aggregations() self.assertDictContainsSubset( {"count": 2, "current_user_participated": True}, @@ -1674,7 +1674,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): self._redact(thread_replies.pop(0)) # Nothing should have changed (except the thread count). - self.assertEquals(self._get_related_events(), thread_replies) + self.assertEqual(self._get_related_events(), thread_replies) relations = self._get_bundled_aggregations() self.assertDictContainsSubset( {"count": 1, "current_user_participated": True}, @@ -1691,11 +1691,11 @@ class RelationRedactionTestCase(BaseRelationsTestCase): # Redact the last remaining event. # #################################### self._redact(thread_replies.pop(0)) - self.assertEquals(thread_replies, []) + self.assertEqual(thread_replies, []) # The event should no longer be considered a thread. - self.assertEquals(self._get_related_events(), []) - self.assertEquals(self._get_bundled_aggregations(), {}) + self.assertEqual(self._get_related_events(), []) + self.assertEqual(self._get_bundled_aggregations(), {}) self.assertEqual(self._get_threads(), []) def test_redact_parent_edit(self) -> None: @@ -1749,8 +1749,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase): # The relations are returned. event_ids = self._get_related_events() relations = self._get_bundled_aggregations() - self.assertEquals(event_ids, [related_event_id]) - self.assertEquals( + self.assertEqual(event_ids, [related_event_id]) + self.assertEqual( relations[RelationTypes.REFERENCE], {"chunk": [{"event_id": related_event_id}]}, ) @@ -1772,7 +1772,7 @@ class RelationRedactionTestCase(BaseRelationsTestCase): # The unredacted relation should still exist. event_ids = self._get_related_events() relations = self._get_bundled_aggregations() - self.assertEquals(len(event_ids), 1) + self.assertEqual(len(event_ids), 1) self.assertDictContainsSubset( { "count": 1, @@ -1816,7 +1816,7 @@ class ThreadsTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) threads = self._get_threads(channel.json_body) self.assertEqual(threads, [(thread_2, reply_2), (thread_1, reply_1)]) @@ -1829,7 +1829,7 @@ class ThreadsTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Tuple of (thread ID, latest event ID) for each thread. threads = self._get_threads(channel.json_body) self.assertEqual(threads, [(thread_1, reply_3), (thread_2, reply_2)]) @@ -1850,7 +1850,7 @@ class ThreadsTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads?limit=1", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]] self.assertEqual(thread_roots, [thread_2]) @@ -1864,7 +1864,7 @@ class ThreadsTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads?limit=1&from={next_batch}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]] self.assertEqual(thread_roots, [thread_1], channel.json_body) @@ -1899,7 +1899,7 @@ class ThreadsTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]] self.assertEqual( thread_roots, [thread_3, thread_2, thread_1], channel.json_body @@ -1911,7 +1911,7 @@ class ThreadsTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads?include=participated", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]] self.assertEqual(thread_roots, [thread_2, thread_1], channel.json_body) @@ -1943,6 +1943,6 @@ class ThreadsTestCase(BaseRelationsTestCase): f"/_matrix/client/v1/rooms/{self.room}/threads", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_roots = [ev["event_id"] for ev in channel.json_body["chunk"]] self.assertEqual(thread_roots, [thread_1], channel.json_body) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 4f6347be15..88e579dc39 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -1362,7 +1362,7 @@ class RoomAppserviceTsParamTestCase(unittest.HomeserverTestCase): # Ensure the event was persisted with the correct timestamp. res = self.get_success(self.main_store.get_event(event_id)) - self.assertEquals(ts, res.origin_server_ts) + self.assertEqual(ts, res.origin_server_ts) def test_send_state_event_ts(self) -> None: """Test sending a state event with a custom timestamp.""" @@ -1384,7 +1384,7 @@ class RoomAppserviceTsParamTestCase(unittest.HomeserverTestCase): # Ensure the event was persisted with the correct timestamp. res = self.get_success(self.main_store.get_event(event_id)) - self.assertEquals(ts, res.origin_server_ts) + self.assertEqual(ts, res.origin_server_ts) def test_send_membership_event_ts(self) -> None: """Test sending a membership event with a custom timestamp.""" @@ -1406,7 +1406,7 @@ class RoomAppserviceTsParamTestCase(unittest.HomeserverTestCase): # Ensure the event was persisted with the correct timestamp. res = self.get_success(self.main_store.get_event(event_id)) - self.assertEquals(ts, res.origin_server_ts) + self.assertEqual(ts, res.origin_server_ts) class RoomJoinRatelimitTestCase(RoomBase): diff --git a/tests/server.py b/tests/server.py index c84a524e8c..481fe34c5c 100644 --- a/tests/server.py +++ b/tests/server.py @@ -26,6 +26,7 @@ from typing import ( Any, Awaitable, Callable, + Deque, Dict, Iterable, List, @@ -41,7 +42,7 @@ from typing import ( from unittest.mock import Mock import attr -from typing_extensions import Deque, ParamSpec +from typing_extensions import ParamSpec from zope.interface import implementer from twisted.internet import address, threads, udp diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 5e1324a169..71302facd1 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -40,7 +40,7 @@ from tests.test_utils import make_awaitable class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: - super(ApplicationServiceStoreTestCase, self).setUp() + super().setUp() self.as_yaml_files: List[str] = [] @@ -71,7 +71,7 @@ class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase): except Exception: pass - super(ApplicationServiceStoreTestCase, self).tearDown() + super().tearDown() def _add_appservice( self, as_token: str, id: str, url: str, hs_token: str, sender: str @@ -110,7 +110,7 @@ class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase): class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: - super(ApplicationServiceTransactionStoreTestCase, self).setUp() + super().setUp() self.as_yaml_files: List[str] = [] self.hs.config.appservice.app_service_config_files = self.as_yaml_files diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index 27f450e22d..b8823d6993 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -20,7 +20,7 @@ from tests import unittest class DataStoreTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: - super(DataStoreTestCase, self).setUp() + super().setUp() self.store = self.hs.get_datastores().main diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index f183c38477..52ffa91c81 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -318,14 +318,14 @@ class MessageSearchTest(HomeserverTestCase): result = self.get_success( store.search_msgs([self.room_id], query, ["content.body"]) ) - self.assertEquals( + self.assertEqual( result["count"], 1 if expect_to_contain else 0, f"expected '{query}' to match '{self.PHRASE}'" if expect_to_contain else f"'{query}' unexpectedly matched '{self.PHRASE}'", ) - self.assertEquals( + self.assertEqual( len(result["results"]), 1 if expect_to_contain else 0, "results array length should match count", @@ -336,14 +336,14 @@ class MessageSearchTest(HomeserverTestCase): result = self.get_success( store.search_rooms([self.room_id], query, ["content.body"], 10) ) - self.assertEquals( + self.assertEqual( result["count"], 1 if expect_to_contain else 0, f"expected '{query}' to match '{self.PHRASE}'" if expect_to_contain else f"'{query}' unexpectedly matched '{self.PHRASE}'", ) - self.assertEquals( + self.assertEqual( len(result["results"]), 1 if expect_to_contain else 0, "results array length should match count", diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 9ed330f554..a46c29ddf4 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -31,7 +31,7 @@ TEST_ROOM_ID = "!TEST:ROOM" class FilterEventsForServerTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: - super(FilterEventsForServerTestCase, self).setUp() + super().setUp() self.event_creation_handler = self.hs.get_event_creation_handler() self.event_builder_factory = self.hs.get_event_builder_factory() self._storage_controllers = self.hs.get_storage_controllers() From 47c629bb27c0a479068ed5da184dffe7a6cb0fca Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Aug 2023 17:07:13 +0100 Subject: [PATCH 322/562] Attempt to fix twisted trunk (#16115) --- .github/workflows/twisted_trunk.yml | 5 ++++- changelog.d/16115.misc | 1 + mypy.ini | 7 +++++++ synapse/util/manhole.py | 4 +++- 4 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16115.misc diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index f7a4ee7c13..67ccc03f6e 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -5,6 +5,9 @@ on: - cron: 0 8 * * * workflow_dispatch: + # NB: inputs are only present when this workflow is dispatched manually. + # (The default below is the default field value in the form to trigger + # a manual dispatch). Otherwise the inputs will evaluate to null. inputs: twisted_ref: description: Commit, branch or tag to checkout from upstream Twisted. @@ -49,7 +52,7 @@ jobs: extras: "all" - run: | poetry remove twisted - poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }} + poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }} poetry install --no-interaction --extras "all test" - name: Remove warn_unused_ignores from mypy config run: sed '/warn_unused_ignores = True/d' -i mypy.ini diff --git a/changelog.d/16115.misc b/changelog.d/16115.misc new file mode 100644 index 0000000000..f325d2a31d --- /dev/null +++ b/changelog.d/16115.misc @@ -0,0 +1 @@ +Attempt to fix the twisted trunk job. diff --git a/mypy.ini b/mypy.ini index 1038b7d8c7..311a951aa8 100644 --- a/mypy.ini +++ b/mypy.ini @@ -45,6 +45,13 @@ warn_unused_ignores = False disallow_untyped_defs = False disallow_incomplete_defs = False +[mypy-synapse.util.manhole] +# This module imports something from Twisted which has a bad annotation in Twisted trunk, +# but is unannotated in Twisted's latest release. We want to type-ignore the problem +# in the twisted trunk job, even though it has no effect on normal mypy runs. +warn_unused_ignores = False + + ;; Dependencies without annotations ;; Before ignoring a module, check to see if type stubs are available. ;; The `typeshed` project maintains stubs here: diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 48b8195ca1..8cb766860e 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -98,7 +98,9 @@ def manhole(settings: ManholeConfig, globals: Dict[str, Any]) -> ServerFactory: SynapseManhole, dict(globals, __name__="__console__") ) - factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker])) + # type-ignore: This is an error in Twisted's annotations. See + # https://github.com/twisted/twisted/issues/11812 and /11813 . + factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker])) # type: ignore[arg-type] # conch has the wrong type on these dicts (says bytes to bytes, # should be bytes to Keys judging by how it's used). From 4513b36a75746da61b5b85a99a1527d4c7f12401 Mon Sep 17 00:00:00 2001 From: axel simon Date: Wed, 16 Aug 2023 14:08:35 +0100 Subject: [PATCH 323/562] =?UTF-8?q?Add=20link=20explaining=20ELK=C2=A0stac?= =?UTF-8?q?k=20to=20structured=5Flogging.md=20(#16091)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelog.d/16091.doc | 1 + docs/structured_logging.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16091.doc diff --git a/changelog.d/16091.doc b/changelog.d/16091.doc new file mode 100644 index 0000000000..a043df4efd --- /dev/null +++ b/changelog.d/16091.doc @@ -0,0 +1 @@ +Structured logging docs: add a link to explain the ELK stack diff --git a/docs/structured_logging.md b/docs/structured_logging.md index d43dc9eb6e..002565b223 100644 --- a/docs/structured_logging.md +++ b/docs/structured_logging.md @@ -3,7 +3,7 @@ A structured logging system can be useful when your logs are destined for a machine to parse and process. By maintaining its machine-readable characteristics, it enables more efficient searching and aggregations when consumed by software -such as the "ELK stack". +such as the [ELK stack](https://opensource.com/article/18/9/open-source-log-aggregation-tools). Synapse's structured logging system is configured via the file that Synapse's `log_config` config option points to. The file should include a formatter which From 8c3bcea2da4939e21a99f72d6c3995186bc4b80d Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 16 Aug 2023 16:19:54 +0200 Subject: [PATCH 324/562] Rename pagination&purge locks and add comments explaining them (#16112) --- changelog.d/16112.misc | 1 + synapse/federation/federation_server.py | 4 ++-- synapse/handlers/message.py | 6 +++--- synapse/handlers/pagination.py | 19 ++++++++++++------- synapse/handlers/room_member.py | 4 ++-- synapse/handlers/worker_lock.py | 6 +++++- .../rest/client/room_upgrade_rest_servlet.py | 4 ++-- synapse/storage/controllers/persist_events.py | 4 ++-- 8 files changed, 29 insertions(+), 19 deletions(-) create mode 100644 changelog.d/16112.misc diff --git a/changelog.d/16112.misc b/changelog.d/16112.misc new file mode 100644 index 0000000000..05a58c1348 --- /dev/null +++ b/changelog.d/16112.misc @@ -0,0 +1 @@ +Rename pagination and purge locks and add comments to explain why they exist and how they work. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a90d99c4d6..f9915e5a3f 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -63,7 +63,7 @@ from synapse.federation.federation_base import ( ) from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction -from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME +from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( make_deferred_yieldable, @@ -1245,7 +1245,7 @@ class FederationServer(FederationBase): # while holding the `_INBOUND_EVENT_HANDLING_LOCK_NAME` # lock. async with self._worker_lock_handler.acquire_read_write_lock( - DELETE_ROOM_LOCK_NAME, room_id, write=False + NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False ): await self._federation_event_handler.on_receive_pdu( origin, event diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d485f21e49..a74db1dccf 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -53,7 +53,7 @@ from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field from synapse.events.validator import EventValidator from synapse.handlers.directory import DirectoryHandler -from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME +from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging import opentracing from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process @@ -1034,7 +1034,7 @@ class EventCreationHandler: ) async with self._worker_lock_handler.acquire_read_write_lock( - DELETE_ROOM_LOCK_NAME, room_id, write=False + NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False ): return await self._create_and_send_nonmember_event_locked( requester=requester, @@ -1978,7 +1978,7 @@ class EventCreationHandler: for room_id in room_ids: async with self._worker_lock_handler.acquire_read_write_lock( - DELETE_ROOM_LOCK_NAME, room_id, write=False + NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False ): dummy_event_sent = await self._send_dummy_event_for_room(room_id) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index da34658470..1be6ebc6d9 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -24,6 +24,7 @@ from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import SerializeEventConfig from synapse.handlers.room import ShutdownRoomResponse +from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging.opentracing import trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.admin._base import assert_user_is_admin @@ -46,9 +47,10 @@ logger = logging.getLogger(__name__) BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3 -PURGE_HISTORY_LOCK_NAME = "purge_history_lock" - -DELETE_ROOM_LOCK_NAME = "delete_room_lock" +# This is used to avoid purging a room several time at the same moment, +# and also paginating during a purge. Pagination can trigger backfill, +# which would create old events locally, and would potentially clash with the room delete. +PURGE_PAGINATION_LOCK_NAME = "purge_pagination_lock" @attr.s(slots=True, auto_attribs=True) @@ -363,7 +365,7 @@ class PaginationHandler: self._purges_in_progress_by_room.add(room_id) try: async with self._worker_locks.acquire_read_write_lock( - PURGE_HISTORY_LOCK_NAME, room_id, write=True + PURGE_PAGINATION_LOCK_NAME, room_id, write=True ): await self._storage_controllers.purge_events.purge_history( room_id, token, delete_local_events @@ -421,7 +423,10 @@ class PaginationHandler: force: set true to skip checking for joined users. """ async with self._worker_locks.acquire_multi_read_write_lock( - [(PURGE_HISTORY_LOCK_NAME, room_id), (DELETE_ROOM_LOCK_NAME, room_id)], + [ + (PURGE_PAGINATION_LOCK_NAME, room_id), + (NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id), + ], write=True, ): # first check that we have no users in this room @@ -483,7 +488,7 @@ class PaginationHandler: room_token = from_token.room_key async with self._worker_locks.acquire_read_write_lock( - PURGE_HISTORY_LOCK_NAME, room_id, write=False + PURGE_PAGINATION_LOCK_NAME, room_id, write=False ): (membership, member_event_id) = (None, None) if not use_admin_priviledge: @@ -761,7 +766,7 @@ class PaginationHandler: self._purges_in_progress_by_room.add(room_id) try: async with self._worker_locks.acquire_read_write_lock( - PURGE_HISTORY_LOCK_NAME, room_id, write=True + PURGE_PAGINATION_LOCK_NAME, room_id, write=True ): self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN self._delete_by_id[ diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e3cdf2bc61..1d8d4a72e7 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -39,7 +39,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler -from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME +from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging import opentracing from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process @@ -621,7 +621,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async with self.member_as_limiter.queue(as_id): async with self.member_linearizer.queue(key): async with self._worker_lock_handler.acquire_read_write_lock( - DELETE_ROOM_LOCK_NAME, room_id, write=False + NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False ): with opentracing.start_active_span("update_membership_locked"): result = await self.update_membership_locked( diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index 72df773a86..58efe7116b 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -42,7 +42,11 @@ if TYPE_CHECKING: from synapse.server import HomeServer -DELETE_ROOM_LOCK_NAME = "delete_room_lock" +# This lock is used to avoid creating an event while we are purging the room. +# We take a read lock when creating an event, and a write one when purging a room. +# This is because it is fine to create several events concurrently, since referenced events +# will not disappear under our feet as long as we don't delete the room. +NEW_EVENT_DURING_PURGE_LOCK_NAME = "new_event_during_purge_lock" class WorkerLocksHandler: diff --git a/synapse/rest/client/room_upgrade_rest_servlet.py b/synapse/rest/client/room_upgrade_rest_servlet.py index 4a5d9e13e7..b1f6b5d1b7 100644 --- a/synapse/rest/client/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/room_upgrade_rest_servlet.py @@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Tuple from synapse.api.errors import Codes, ShadowBanError, SynapseError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME +from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -81,7 +81,7 @@ class RoomUpgradeRestServlet(RestServlet): try: async with self._worker_lock_handler.acquire_read_write_lock( - DELETE_ROOM_LOCK_NAME, room_id, write=False + NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False ): new_room_id = await self._room_creation_handler.upgrade_room( requester, room_id, new_version diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 35cd1089d6..abd1d149db 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -45,7 +45,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME +from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.logging.opentracing import ( SynapseTags, @@ -357,7 +357,7 @@ class EventsPersistenceStorageController: # it. We might already have taken out the lock, but since this is just a # "read" lock its inherently reentrant. async with self.hs.get_worker_locks_handler().acquire_read_write_lock( - DELETE_ROOM_LOCK_NAME, room_id, write=False + NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id, write=False ): if isinstance(task, _PersistEventsTask): return await self._persist_event_batch(room_id, task) From 8a4fb7a6baf32a35c24056474d98fed525522b78 Mon Sep 17 00:00:00 2001 From: Matthew Ma Date: Thu, 17 Aug 2023 02:22:50 -0700 Subject: [PATCH 325/562] Disable caching in /sync corner case (#16080) Fixes #15502 --- changelog.d/16080.bugfix | 1 + synapse/handlers/sync.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) create mode 100644 changelog.d/16080.bugfix diff --git a/changelog.d/16080.bugfix b/changelog.d/16080.bugfix new file mode 100644 index 0000000000..1ad6fb3c52 --- /dev/null +++ b/changelog.d/16080.bugfix @@ -0,0 +1 @@ +Fix a long-standing bu in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. \ No newline at end of file diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 8174248387..60a9f341b5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -387,16 +387,16 @@ class SyncHandler: from_token=since_token, ) - # if nothing has happened in any of the users' rooms since /sync was called, - # the resultant next_batch will be the same as since_token (since the result - # is generated when wait_for_events is first called, and not regenerated - # when wait_for_events times out). - # - # If that happens, we mustn't cache it, so that when the client comes back - # with the same cache token, we don't immediately return the same empty - # result, causing a tightloop. (#8518) - if result.next_batch == since_token: - cache_context.should_cache = False + # if nothing has happened in any of the users' rooms since /sync was called, + # the resultant next_batch will be the same as since_token (since the result + # is generated when wait_for_events is first called, and not regenerated + # when wait_for_events times out). + # + # If that happens, we mustn't cache it, so that when the client comes back + # with the same cache token, we don't immediately return the same empty + # result, causing a tightloop. (#8518) + if result.next_batch == since_token: + cache_context.should_cache = False if result: if sync_config.filter_collection.lazy_load_members(): From 0377cb4fab27c717bc75ea27225c950b0215c152 Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 17 Aug 2023 02:30:02 -0700 Subject: [PATCH 326/562] Override global statement timeout when creating indexes in Postgres (#16085) --- changelog.d/16085.misc | 1 + synapse/storage/background_updates.py | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 changelog.d/16085.misc diff --git a/changelog.d/16085.misc b/changelog.d/16085.misc new file mode 100644 index 0000000000..7b7a95edd4 --- /dev/null +++ b/changelog.d/16085.misc @@ -0,0 +1 @@ +Override global statement timeout when creating indexes in Postgres. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 2d5ddc3e7b..ddca0af1da 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -238,6 +238,7 @@ class BackgroundUpdater: def __init__(self, hs: "HomeServer", database: "DatabasePool"): self._clock = hs.get_clock() self.db_pool = database + self.hs = hs self._database_name = database.name() @@ -758,6 +759,11 @@ class BackgroundUpdater: logger.debug("[SQL] %s", sql) c.execute(sql) + # override the global statement timeout to avoid accidentally squashing + # a long-running index creation process + timeout_sql = "SET SESSION statement_timeout = 0" + c.execute(timeout_sql) + sql = ( "CREATE %(unique)s INDEX CONCURRENTLY %(name)s" " ON %(table)s" @@ -778,6 +784,12 @@ class BackgroundUpdater: logger.debug("[SQL] %s", sql) c.execute(sql) finally: + # mypy ignore - `statement_timeout` is defined on PostgresEngine + # reset the global timeout to the default + default_timeout = self.db_pool.engine.statement_timeout # type: ignore[attr-defined] + undo_timeout_sql = f"SET statement_timeout = {default_timeout}" + conn.cursor().execute(undo_timeout_sql) + conn.set_session(autocommit=False) # type: ignore def create_index_sqlite(conn: Connection) -> None: From eb0dbab15b119eab7721bc03ac1cfc7f6b638bb3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 17 Aug 2023 14:07:57 +0100 Subject: [PATCH 327/562] Fix database performance of read/write worker locks (#16061) We were seeing serialization errors when taking out multiple read locks. The transactions were retried, so isn't causing any failures. Introduced in #15782. --- changelog.d/16061.misc | 1 + synapse/storage/databases/main/lock.py | 87 +++++++++-------------- tests/storage/databases/main/test_lock.py | 7 +- 3 files changed, 40 insertions(+), 55 deletions(-) create mode 100644 changelog.d/16061.misc diff --git a/changelog.d/16061.misc b/changelog.d/16061.misc new file mode 100644 index 0000000000..37928b670f --- /dev/null +++ b/changelog.d/16061.misc @@ -0,0 +1 @@ +Fix database performance of read/write worker locks. diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 1680bf6168..54d40e7a3a 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -26,7 +26,6 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) -from synapse.storage.engines import PostgresEngine from synapse.util import Clock from synapse.util.stringutils import random_string @@ -96,6 +95,10 @@ class LockStore(SQLBaseStore): self._acquiring_locks: Set[Tuple[str, str]] = set() + self._clock.looping_call( + self._reap_stale_read_write_locks, _LOCK_TIMEOUT_MS / 10.0 + ) + @wrap_as_background_process("LockStore._on_shutdown") async def _on_shutdown(self) -> None: """Called when the server is shutting down""" @@ -216,6 +219,7 @@ class LockStore(SQLBaseStore): lock_name, lock_key, write, + db_autocommit=True, ) except self.database_engine.module.IntegrityError: return None @@ -233,61 +237,22 @@ class LockStore(SQLBaseStore): # `worker_read_write_locks` and seeing if that fails any # constraints. If it doesn't then we have acquired the lock, # otherwise we haven't. - # - # Before that though we clear the table of any stale locks. now = self._clock.time_msec() token = random_string(6) - delete_sql = """ - DELETE FROM worker_read_write_locks - WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?; - """ - - insert_sql = """ - INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts) - VALUES (?, ?, ?, ?, ?, ?) - """ - - if isinstance(self.database_engine, PostgresEngine): - # For Postgres we can send these queries at the same time. - txn.execute( - delete_sql + ";" + insert_sql, - ( - # DELETE args - now - _LOCK_TIMEOUT_MS, - lock_name, - lock_key, - # UPSERT args - lock_name, - lock_key, - write, - self._instance_name, - token, - now, - ), - ) - else: - # For SQLite these need to be two queries. - txn.execute( - delete_sql, - ( - now - _LOCK_TIMEOUT_MS, - lock_name, - lock_key, - ), - ) - txn.execute( - insert_sql, - ( - lock_name, - lock_key, - write, - self._instance_name, - token, - now, - ), - ) + self.db_pool.simple_insert_txn( + txn, + table="worker_read_write_locks", + values={ + "lock_name": lock_name, + "lock_key": lock_key, + "write_lock": write, + "instance_name": self._instance_name, + "token": token, + "last_renewed_ts": now, + }, + ) lock = Lock( self._reactor, @@ -351,6 +316,24 @@ class LockStore(SQLBaseStore): return locks + @wrap_as_background_process("_reap_stale_read_write_locks") + async def _reap_stale_read_write_locks(self) -> None: + delete_sql = """ + DELETE FROM worker_read_write_locks + WHERE last_renewed_ts < ? + """ + + def reap_stale_read_write_locks_txn(txn: LoggingTransaction) -> None: + txn.execute(delete_sql, (self._clock.time_msec() - _LOCK_TIMEOUT_MS,)) + if txn.rowcount: + logger.info("Reaped %d stale locks", txn.rowcount) + + await self.db_pool.runInteraction( + "_reap_stale_read_write_locks", + reap_stale_read_write_locks_txn, + db_autocommit=True, + ) + class Lock: """An async context manager that manages an acquired lock, ensuring it is diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 383da83dfb..f541f1d6be 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. + from twisted.internet import defer, reactor from twisted.internet.base import ReactorBase from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer -from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS +from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS, _RENEWAL_INTERVAL_MS from synapse.util import Clock from tests import unittest @@ -380,8 +381,8 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase): self.get_success(lock.__aenter__()) # Wait for ages with the lock, we should not be able to get the lock. - self.reactor.advance(5 * _LOCK_TIMEOUT_MS / 1000) - self.pump() + for _ in range(0, 10): + self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000)) lock2 = self.get_success( self.store.try_acquire_read_write_lock("name", "key", write=True) From 54a51ff6c1a1d9c7174e239acdd2dee7bed744f7 Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 17 Aug 2023 10:53:10 -0700 Subject: [PATCH 328/562] Cache token introspection response from OIDC provider (#16117) --- changelog.d/16117.misc | 1 + synapse/api/auth/msc3861_delegated.py | 40 +++++++++++++++- tests/handlers/test_oauth_delegation.py | 62 +++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16117.misc diff --git a/changelog.d/16117.misc b/changelog.d/16117.misc new file mode 100644 index 0000000000..f33fa6dc17 --- /dev/null +++ b/changelog.d/16117.misc @@ -0,0 +1 @@ +Cache token introspection response from OIDC provider. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 9524102a30..3a516093f5 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -39,6 +39,7 @@ from synapse.logging.context import make_deferred_yieldable from synapse.types import Requester, UserID, create_requester from synapse.util import json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall +from synapse.util.caches.expiringcache import ExpiringCache if TYPE_CHECKING: from synapse.server import HomeServer @@ -106,6 +107,14 @@ class MSC3861DelegatedAuth(BaseAuth): self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) + self._clock = hs.get_clock() + self._token_cache: ExpiringCache[str, IntrospectionToken] = ExpiringCache( + cache_name="introspection_token_cache", + clock=self._clock, + max_len=10000, + expiry_ms=5 * 60 * 1000, + ) + if isinstance(auth_method, PrivateKeyJWTWithKid): # Use the JWK as the client secret when using the private_key_jwt method assert self._config.jwk, "No JWK provided" @@ -144,6 +153,20 @@ class MSC3861DelegatedAuth(BaseAuth): Returns: The introspection response """ + # check the cache before doing a request + introspection_token = self._token_cache.get(token, None) + + if introspection_token: + # check the expiration field of the token (if it exists) + exp = introspection_token.get("exp", None) + if exp: + time_now = self._clock.time() + expired = time_now > exp + if not expired: + return introspection_token + else: + return introspection_token + metadata = await self._issuer_metadata.get() introspection_endpoint = metadata.get("introspection_endpoint") raw_headers: Dict[str, str] = { @@ -157,7 +180,10 @@ class MSC3861DelegatedAuth(BaseAuth): # Fill the body/headers with credentials uri, raw_headers, body = self._client_auth.prepare( - method="POST", uri=introspection_endpoint, headers=raw_headers, body=body + method="POST", + uri=introspection_endpoint, + headers=raw_headers, + body=body, ) headers = Headers({k: [v] for (k, v) in raw_headers.items()}) @@ -187,7 +213,17 @@ class MSC3861DelegatedAuth(BaseAuth): "The introspection endpoint returned an invalid JSON response." ) - return IntrospectionToken(**resp) + expiration = resp.get("exp", None) + if expiration: + if self._clock.time() > expiration: + raise InvalidClientTokenError("Token is expired.") + + introspection_token = IntrospectionToken(**resp) + + # add token to cache + self._token_cache[token] = introspection_token + + return introspection_token async def is_server_admin(self, requester: Requester) -> bool: return "urn:synapse:admin:*" in requester.scope diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 6309d7b36e..82c26e303f 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -491,6 +491,68 @@ class MSC3861OAuthDelegation(HomeserverTestCase): error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) self.assertEqual(error.value.code, 503) + def test_introspection_token_cache(self) -> None: + access_token = "open_sesame" + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={"active": "true", "scope": "guest", "jti": access_token}, + ) + ) + + # first call should cache response + # Mpyp ignores below are due to mypy not understanding the dynamic substitution of msc3861 auth code + # for regular auth code via the config + self.get_success( + self.auth._introspect_token(access_token) # type: ignore[attr-defined] + ) + introspection_token = self.auth._token_cache.get(access_token) # type: ignore[attr-defined] + self.assertEqual(introspection_token["jti"], access_token) + # there's been one http request + self.http_client.request.assert_called_once() + + # second call should pull from cache, there should still be only one http request + token = self.get_success(self.auth._introspect_token(access_token)) # type: ignore[attr-defined] + self.http_client.request.assert_called_once() + self.assertEqual(token["jti"], access_token) + + # advance past five minutes and check that cache expired - there should be more than one http call now + self.reactor.advance(360) + token_2 = self.get_success(self.auth._introspect_token(access_token)) # type: ignore[attr-defined] + self.assertEqual(self.http_client.request.call_count, 2) + self.assertEqual(token_2["jti"], access_token) + + # test that if a cached token is expired, a fresh token will be pulled from authorizing server - first add a + # token with a soon-to-expire `exp` field to the cache + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": "true", + "scope": "guest", + "jti": "stale", + "exp": self.clock.time() + 100, + }, + ) + ) + self.get_success( + self.auth._introspect_token("stale") # type: ignore[attr-defined] + ) + introspection_token = self.auth._token_cache.get("stale") # type: ignore[attr-defined] + self.assertEqual(introspection_token["jti"], "stale") + self.assertEqual(self.http_client.request.call_count, 1) + + # advance the reactor past the token expiry but less than the cache expiry + self.reactor.advance(120) + self.assertEqual(self.auth._token_cache.get("stale"), introspection_token) # type: ignore[attr-defined] + + # check that the next call causes another http request (which will fail because the token is technically expired + # but the important thing is we discard the token from the cache and try the network) + self.get_failure( + self.auth._introspect_token("stale"), InvalidClientTokenError # type: ignore[attr-defined] + ) + self.assertEqual(self.http_client.request.call_count, 2) + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: # We only generate a master key to simplify the test. master_signing_key = generate_signing_key(device_id) From 0aba4a4eaac778ad75509fe20733b27bfc86fd9d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Aug 2023 11:05:01 +0100 Subject: [PATCH 329/562] Add cache to `get_server_keys_json_for_remote` (#16123) --- changelog.d/16123.misc | 1 + synapse/rest/key/v2/remote_key_resource.py | 44 ++++--- synapse/storage/databases/main/keys.py | 136 ++++++++++++++------- synapse/storage/keys.py | 7 ++ tests/crypto/test_keyring.py | 57 ++++----- 5 files changed, 144 insertions(+), 101 deletions(-) create mode 100644 changelog.d/16123.misc diff --git a/changelog.d/16123.misc b/changelog.d/16123.misc new file mode 100644 index 0000000000..b7c6b7c2f2 --- /dev/null +++ b/changelog.d/16123.misc @@ -0,0 +1 @@ +Add cache to `get_server_keys_json_for_remote`. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 8f3865d412..981fd1f58a 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -14,7 +14,7 @@ import logging import re -from typing import TYPE_CHECKING, Dict, Optional, Set, Tuple +from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple from signedjson.sign import sign_json @@ -27,6 +27,7 @@ from synapse.http.servlet import ( parse_integer, parse_json_object_from_request, ) +from synapse.storage.keys import FetchKeyResultForRemote from synapse.types import JsonDict from synapse.util import json_decoder from synapse.util.async_helpers import yieldable_gather_results @@ -157,14 +158,22 @@ class RemoteKey(RestServlet): ) -> JsonDict: logger.info("Handling query for keys %r", query) - store_queries = [] + server_keys: Dict[Tuple[str, str], Optional[FetchKeyResultForRemote]] = {} for server_name, key_ids in query.items(): - if not key_ids: - key_ids = (None,) - for key_id in key_ids: - store_queries.append((server_name, key_id, None)) + if key_ids: + results: Mapping[ + str, Optional[FetchKeyResultForRemote] + ] = await self.store.get_server_keys_json_for_remote( + server_name, key_ids + ) + else: + results = await self.store.get_all_server_keys_json_for_remote( + server_name + ) - cached = await self.store.get_server_keys_json_for_remote(store_queries) + server_keys.update( + ((server_name, key_id), res) for key_id, res in results.items() + ) json_results: Set[bytes] = set() @@ -173,23 +182,20 @@ class RemoteKey(RestServlet): # Map server_name->key_id->int. Note that the value of the int is unused. # XXX: why don't we just use a set? cache_misses: Dict[str, Dict[str, int]] = {} - for (server_name, key_id, _), key_results in cached.items(): - results = [(result["ts_added_ms"], result) for result in key_results] - - if key_id is None: + for (server_name, key_id), key_result in server_keys.items(): + if not query[server_name]: # all keys were requested. Just return what we have without worrying # about validity - for _, result in results: - # Cast to bytes since postgresql returns a memoryview. - json_results.add(bytes(result["key_json"])) + if key_result: + json_results.add(key_result.key_json) continue miss = False - if not results: + if key_result is None: miss = True else: - ts_added_ms, most_recent_result = max(results) - ts_valid_until_ms = most_recent_result["ts_valid_until_ms"] + ts_added_ms = key_result.added_ts + ts_valid_until_ms = key_result.valid_until_ts req_key = query.get(server_name, {}).get(key_id, {}) req_valid_until = req_key.get("minimum_valid_until_ts") if req_valid_until is not None: @@ -235,8 +241,8 @@ class RemoteKey(RestServlet): ts_valid_until_ms, time_now_ms, ) - # Cast to bytes since postgresql returns a memoryview. - json_results.add(bytes(most_recent_result["key_json"])) + + json_results.add(key_result.key_json) if miss and query_remote_on_cache_miss: # only bother attempting to fetch keys from servers on our whitelist diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index cea32a034a..a3b4744855 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -16,14 +16,13 @@ import itertools import json import logging -from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple +from typing import Dict, Iterable, Mapping, Optional, Tuple from signedjson.key import decode_verify_key_bytes from unpaddedbase64 import decode_base64 -from synapse.storage._base import SQLBaseStore -from synapse.storage.database import LoggingTransaction -from synapse.storage.keys import FetchKeyResult +from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore +from synapse.storage.keys import FetchKeyResult, FetchKeyResultForRemote from synapse.storage.types import Cursor from synapse.util.caches.descriptors import cached, cachedList from synapse.util.iterutils import batch_iter @@ -34,7 +33,7 @@ logger = logging.getLogger(__name__) db_binary_type = memoryview -class KeyStore(SQLBaseStore): +class KeyStore(CacheInvalidationWorkerStore): """Persistence for signature verification keys""" @cached() @@ -188,7 +187,12 @@ class KeyStore(SQLBaseStore): # invalidate takes a tuple corresponding to the params of # _get_server_keys_json. _get_server_keys_json only takes one # param, which is itself the 2-tuple (server_name, key_id). - self._get_server_keys_json.invalidate(((server_name, key_id),)) + await self.invalidate_cache_and_stream( + "_get_server_keys_json", ((server_name, key_id),) + ) + await self.invalidate_cache_and_stream( + "get_server_key_json_for_remote", (server_name, key_id) + ) @cached() def _get_server_keys_json( @@ -253,47 +257,87 @@ class KeyStore(SQLBaseStore): return await self.db_pool.runInteraction("get_server_keys_json", _txn) + @cached() + def get_server_key_json_for_remote( + self, + server_name: str, + key_id: str, + ) -> Optional[FetchKeyResultForRemote]: + raise NotImplementedError() + + @cachedList( + cached_method_name="get_server_key_json_for_remote", list_name="key_ids" + ) async def get_server_keys_json_for_remote( - self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]] - ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[Dict[str, Any]]]: - """Retrieve the key json for a list of server_keys and key ids. - If no keys are found for a given server, key_id and source then - that server, key_id, and source triplet entry will be an empty list. - The JSON is returned as a byte array so that it can be efficiently - used in an HTTP response. + self, server_name: str, key_ids: Iterable[str] + ) -> Dict[str, Optional[FetchKeyResultForRemote]]: + """Fetch the cached keys for the given server/key IDs. - Args: - server_keys: List of (server_name, key_id, source) triplets. - - Returns: - A mapping from (server_name, key_id, source) triplets to a list of dicts + If we have multiple entries for a given key ID, returns the most recent. """ - - def _get_server_keys_json_txn( - txn: LoggingTransaction, - ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[Dict[str, Any]]]: - results = {} - for server_name, key_id, from_server in server_keys: - keyvalues = {"server_name": server_name} - if key_id is not None: - keyvalues["key_id"] = key_id - if from_server is not None: - keyvalues["from_server"] = from_server - rows = self.db_pool.simple_select_list_txn( - txn, - "server_keys_json", - keyvalues=keyvalues, - retcols=( - "key_id", - "from_server", - "ts_added_ms", - "ts_valid_until_ms", - "key_json", - ), - ) - results[(server_name, key_id, from_server)] = rows - return results - - return await self.db_pool.runInteraction( - "get_server_keys_json", _get_server_keys_json_txn + rows = await self.db_pool.simple_select_many_batch( + table="server_keys_json", + column="key_id", + iterable=key_ids, + keyvalues={"server_name": server_name}, + retcols=( + "key_id", + "from_server", + "ts_added_ms", + "ts_valid_until_ms", + "key_json", + ), + desc="get_server_keys_json_for_remote", ) + + if not rows: + return {} + + # We sort the rows so that the most recently added entry is picked up. + rows.sort(key=lambda r: r["ts_added_ms"]) + + return { + row["key_id"]: FetchKeyResultForRemote( + # Cast to bytes since postgresql returns a memoryview. + key_json=bytes(row["key_json"]), + valid_until_ts=row["ts_valid_until_ms"], + added_ts=row["ts_added_ms"], + ) + for row in rows + } + + async def get_all_server_keys_json_for_remote( + self, + server_name: str, + ) -> Dict[str, FetchKeyResultForRemote]: + """Fetch the cached keys for the given server. + + If we have multiple entries for a given key ID, returns the most recent. + """ + rows = await self.db_pool.simple_select_list( + table="server_keys_json", + keyvalues={"server_name": server_name}, + retcols=( + "key_id", + "from_server", + "ts_added_ms", + "ts_valid_until_ms", + "key_json", + ), + desc="get_server_keys_json_for_remote", + ) + + if not rows: + return {} + + rows.sort(key=lambda r: r["ts_added_ms"]) + + return { + row["key_id"]: FetchKeyResultForRemote( + # Cast to bytes since postgresql returns a memoryview. + key_json=bytes(row["key_json"]), + valid_until_ts=row["ts_valid_until_ms"], + added_ts=row["ts_added_ms"], + ) + for row in rows + } diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 71584f3f74..e74b2269d2 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -25,3 +25,10 @@ logger = logging.getLogger(__name__) class FetchKeyResult: verify_key: VerifyKey # the key itself valid_until_ts: int # how long we can use this key for + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class FetchKeyResultForRemote: + key_json: bytes # the full key JSON + valid_until_ts: int # how long we can use this key for, in milliseconds. + added_ts: int # When we added this key, in milliseconds. diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index fdfd4f911d..2be341ac7b 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -456,24 +456,19 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual(k.verify_key.version, "ver1") # check that the perspectives store is correctly updated - lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json_for_remote( - [lookup_triplet] + SERVER_NAME, [testverifykey_id] ) ) - res_keys = key_json[lookup_triplet] - self.assertEqual(len(res_keys), 1) - res = res_keys[0] - self.assertEqual(res["key_id"], testverifykey_id) - self.assertEqual(res["from_server"], SERVER_NAME) - self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) - self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) + res = key_json[testverifykey_id] + self.assertIsNotNone(res) + assert res is not None + self.assertEqual(res.added_ts, self.reactor.seconds() * 1000) + self.assertEqual(res.valid_until_ts, VALID_UNTIL_TS) # we expect it to be encoded as canonical json *before* it hits the db - self.assertEqual( - bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) - ) + self.assertEqual(res.key_json, canonicaljson.encode_canonical_json(response)) # change the server name: the result should be ignored response["server_name"] = "OTHER_SERVER" @@ -576,23 +571,18 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual(k.verify_key.version, "ver1") # check that the perspectives store is correctly updated - lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json_for_remote( - [lookup_triplet] + SERVER_NAME, [testverifykey_id] ) ) - res_keys = key_json[lookup_triplet] - self.assertEqual(len(res_keys), 1) - res = res_keys[0] - self.assertEqual(res["key_id"], testverifykey_id) - self.assertEqual(res["from_server"], self.mock_perspective_server.server_name) - self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) - self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) + res = key_json[testverifykey_id] + self.assertIsNotNone(res) + assert res is not None + self.assertEqual(res.added_ts, self.reactor.seconds() * 1000) + self.assertEqual(res.valid_until_ts, VALID_UNTIL_TS) - self.assertEqual( - bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) - ) + self.assertEqual(res.key_json, canonicaljson.encode_canonical_json(response)) def test_get_multiple_keys_from_perspectives(self) -> None: """Check that we can correctly request multiple keys for the same server""" @@ -699,23 +689,18 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual(k.verify_key.version, "ver1") # check that the perspectives store is correctly updated - lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json_for_remote( - [lookup_triplet] + SERVER_NAME, [testverifykey_id] ) ) - res_keys = key_json[lookup_triplet] - self.assertEqual(len(res_keys), 1) - res = res_keys[0] - self.assertEqual(res["key_id"], testverifykey_id) - self.assertEqual(res["from_server"], self.mock_perspective_server.server_name) - self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) - self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) + res = key_json[testverifykey_id] + self.assertIsNotNone(res) + assert res is not None + self.assertEqual(res.added_ts, self.reactor.seconds() * 1000) + self.assertEqual(res.valid_until_ts, VALID_UNTIL_TS) - self.assertEqual( - bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) - ) + self.assertEqual(res.key_json, canonicaljson.encode_canonical_json(response)) def test_invalid_perspectives_responses(self) -> None: """Check that invalid responses from the perspectives server are rejected""" From 6130afb862c6547e5e279353fc032c4d63fe14d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Aug 2023 12:16:00 +0100 Subject: [PATCH 330/562] Add response time metrics for introspection requests (#16131) See #16119 --- changelog.d/16131.misc | 1 + synapse/api/auth/msc3861_delegated.py | 34 +++++++++++++++++++++------ 2 files changed, 28 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16131.misc diff --git a/changelog.d/16131.misc b/changelog.d/16131.misc new file mode 100644 index 0000000000..4f04699512 --- /dev/null +++ b/changelog.d/16131.misc @@ -0,0 +1 @@ +Add response time metrics for introspection requests for delegated auth. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 3a516093f5..18875f2c81 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -20,6 +20,7 @@ from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign from authlib.oauth2.rfc7662 import IntrospectionToken from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url +from prometheus_client import Histogram from twisted.web.client import readBody from twisted.web.http_headers import Headers @@ -46,6 +47,13 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +introspection_response_timer = Histogram( + "synapse_api_auth_delegated_introspection_response", + "Time taken to get a response for an introspection request", + ["code"], +) + + # Scope as defined by MSC2967 # https://github.com/matrix-org/matrix-spec-proposals/pull/2967 SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*" @@ -190,14 +198,26 @@ class MSC3861DelegatedAuth(BaseAuth): # Do the actual request # We're not using the SimpleHttpClient util methods as we don't want to # check the HTTP status code, and we do the body encoding ourselves. - response = await self._http_client.request( - method="POST", - uri=uri, - data=body.encode("utf-8"), - headers=headers, - ) - resp_body = await make_deferred_yieldable(readBody(response)) + start_time = self._clock.time() + try: + response = await self._http_client.request( + method="POST", + uri=uri, + data=body.encode("utf-8"), + headers=headers, + ) + + resp_body = await make_deferred_yieldable(readBody(response)) + except Exception: + end_time = self._clock.time() + introspection_response_timer.labels("ERR").observe(end_time - start_time) + raise + + end_time = self._clock.time() + introspection_response_timer.labels(response.code).observe( + end_time - start_time + ) if response.code < 200 or response.code >= 300: raise HttpResponseException( From 54317d34b76adb1e8f694acd91f631b3abe38947 Mon Sep 17 00:00:00 2001 From: Alexander Fechler <141915399+afechler@users.noreply.github.com> Date: Fri, 18 Aug 2023 13:26:38 +0200 Subject: [PATCH 331/562] Allow filtering for admins in the list accounts admin API (#16114) --- changelog.d/16114.feature | 1 + docs/admin_api/user_admin_api.md | 2 ++ synapse/rest/admin/users.py | 3 ++ synapse/storage/databases/main/__init__.py | 10 ++++++ tests/rest/admin/test_user.py | 38 ++++++++++++++++++++++ 5 files changed, 54 insertions(+) create mode 100644 changelog.d/16114.feature diff --git a/changelog.d/16114.feature b/changelog.d/16114.feature new file mode 100644 index 0000000000..e937a3b029 --- /dev/null +++ b/changelog.d/16114.feature @@ -0,0 +1 @@ +Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index c269ce6af0..99abfea3a0 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -219,6 +219,8 @@ The following parameters should be set in the URL: **or** displaynames that contain this value. - `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users. Defaults to `true` to include guest users. +- `admins` - Optional flag to filter admins. If `true`, only admins are queried. If `false`, admins are excluded from + the query. When the flag is absent (the default), **both** admins and non-admins are included in the search results. - `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users. Defaults to `false` to exclude deactivated users. - `limit` - string representing a positive integer - Is optional but is used for pagination, diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 04d9ef25b7..240e6254b0 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -109,6 +109,8 @@ class UsersRestServletV2(RestServlet): ) deactivated = parse_boolean(request, "deactivated", default=False) + admins = parse_boolean(request, "admins") + # If support for MSC3866 is not enabled, apply no filtering based on the # `approved` column. if self._msc3866_enabled: @@ -146,6 +148,7 @@ class UsersRestServletV2(RestServlet): name, guests, deactivated, + admins, order_by, direction, approved, diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index be67d1ff22..e17f25e87a 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -168,6 +168,7 @@ class DataStore( name: Optional[str] = None, guests: bool = True, deactivated: bool = False, + admins: Optional[bool] = None, order_by: str = UserSortOrder.NAME.value, direction: Direction = Direction.FORWARDS, approved: bool = True, @@ -184,6 +185,9 @@ class DataStore( name: search for local part of user_id or display name guests: whether to in include guest users deactivated: whether to include deactivated users + admins: Optional flag to filter admins. If true, only admins are queried. + if false, admins are excluded from the query. When it is + none (the default), both admins and none-admins are queried. order_by: the sort order of the returned list direction: sort ascending or descending approved: whether to include approved users @@ -220,6 +224,12 @@ class DataStore( if not deactivated: filters.append("deactivated = 0") + if admins is not None: + if admins: + filters.append("admin = 1") + else: + filters.append("admin = 0") + if not approved: # We ignore NULL values for the approved flag because these should only # be already existing users that we consider as already approved. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 41a959b4d6..feb81844ae 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -879,6 +879,44 @@ class UsersListTestCase(unittest.HomeserverTestCase): self._order_test([self.admin_user, user1, user2], "creation_ts", "f") self._order_test([user2, user1, self.admin_user], "creation_ts", "b") + def test_filter_admins(self) -> None: + """ + Tests whether the various values of the query parameter `admins` lead to the + expected result set. + """ + + # Register an additional non admin user + self.register_user("user", "pass", admin=False) + + # Query all users + channel = self.make_request( + "GET", + f"{self.url}", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(2, channel.json_body["total"]) + + # Query only admin users + channel = self.make_request( + "GET", + f"{self.url}?admins=true", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(1, channel.json_body["total"]) + self.assertEqual(1, channel.json_body["users"][0]["admin"]) + + # Query only non admin users + channel = self.make_request( + "GET", + f"{self.url}?admins=false", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(1, channel.json_body["total"]) + self.assertFalse(channel.json_body["users"][0]["admin"]) + @override_config( { "experimental_features": { From 2d15e396843879bb514a148097cbddf10f50655c Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Fri, 18 Aug 2023 15:46:46 +0200 Subject: [PATCH 332/562] MSC3861: allow impersonation by an admin using a query param (#16132) --- changelog.d/16132.misc | 1 + synapse/api/auth/msc3861_delegated.py | 25 +++++++++++++++--- tests/handlers/test_oauth_delegation.py | 35 +++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16132.misc diff --git a/changelog.d/16132.misc b/changelog.d/16132.misc new file mode 100644 index 0000000000..aca26079d8 --- /dev/null +++ b/changelog.d/16132.misc @@ -0,0 +1 @@ +MSC3861: allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 18875f2c81..4bdfe31b22 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -246,7 +246,7 @@ class MSC3861DelegatedAuth(BaseAuth): return introspection_token async def is_server_admin(self, requester: Requester) -> bool: - return "urn:synapse:admin:*" in requester.scope + return SCOPE_SYNAPSE_ADMIN in requester.scope async def get_user_by_req( self, @@ -263,6 +263,25 @@ class MSC3861DelegatedAuth(BaseAuth): # so that we don't provision the user if they don't have enough permission: requester = await self.get_user_by_access_token(access_token, allow_expired) + # Allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter + if request.args is not None: + user_id_params = request.args.get(b"_oidc_admin_impersonate_user_id") + if user_id_params: + if await self.is_server_admin(requester): + user_id_str = user_id_params[0].decode("ascii") + impersonated_user_id = UserID.from_string(user_id_str) + logging.info(f"Admin impersonation of user {user_id_str}") + requester = create_requester( + user_id=impersonated_user_id, + scope=[SCOPE_MATRIX_API], + authenticated_entity=requester.user.to_string(), + ) + else: + raise AuthError( + 401, + "Impersonation not possible by a non admin user", + ) + # Deny the request if the user account is locked. if not allow_locked and await self.store.get_user_locked_status( requester.user.to_string() @@ -290,14 +309,14 @@ class MSC3861DelegatedAuth(BaseAuth): # XXX: This is a temporary solution so that the admin API can be called by # the OIDC provider. This will be removed once we have OIDC client # credentials grant support in matrix-authentication-service. - logging.info("Admin toked used") + logging.info("Admin token used") # XXX: that user doesn't exist and won't be provisioned. # This is mostly fine for admin calls, but we should also think about doing # requesters without a user_id. admin_user = UserID("__oidc_admin", self._hostname) return create_requester( user_id=admin_user, - scope=["urn:synapse:admin:*"], + scope=[SCOPE_SYNAPSE_ADMIN], ) try: diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 82c26e303f..1456b675a7 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -340,6 +340,41 @@ class MSC3861OAuthDelegation(HomeserverTestCase): get_awaitable_result(self.auth.is_server_admin(requester)), False ) + def test_active_user_admin_impersonation(self) -> None: + """The handler should return a requester with normal user rights + and an user ID matching the one specified in query param `user_id`""" + + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]), + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + impersonated_user_id = f"@{USERNAME}:{SERVER_NAME}" + request.args[b"_oidc_admin_impersonate_user_id"] = [ + impersonated_user_id.encode("ascii") + ] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), impersonated_user_id) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + def test_active_user_with_device(self) -> None: """The handler should return a requester with normal user rights and a device ID.""" From bd558a6dc369b6f5d06ab6fd2500faa216a45883 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Aug 2023 15:32:06 +0100 Subject: [PATCH 333/562] Speed up state res in rare case we don't have all events (#16116) If we don't have all the auth events in a room then not all state events will have a chain cover index. Even so, we can still use the chain cover index on the events that do have it, rather than bailing and using the slower functions. This situation should not arise for newly persisted rooms, as we check we have the full auth chain for each event, but can happen for existing rooms. c.f. #15245 --- changelog.d/16116.bugfix | 1 + .../databases/main/event_federation.py | 184 +++++++++++-- tests/storage/test_event_federation.py | 241 ++++++++++++++---- 3 files changed, 355 insertions(+), 71 deletions(-) create mode 100644 changelog.d/16116.bugfix diff --git a/changelog.d/16116.bugfix b/changelog.d/16116.bugfix new file mode 100644 index 0000000000..f57a26ae39 --- /dev/null +++ b/changelog.d/16116.bugfix @@ -0,0 +1 @@ +Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 534dc32413..fab7008a8f 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -452,33 +452,56 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # sets. seen_chains: Set[int] = set() - sql = """ - SELECT event_id, chain_id, sequence_number - FROM event_auth_chains - WHERE %s - """ - for batch in batch_iter(initial_events, 1000): - clause, args = make_in_list_sql_clause( - txn.database_engine, "event_id", batch - ) - txn.execute(sql % (clause,), args) + # Fetch the chain cover index for the initial set of events we're + # considering. + def fetch_chain_info(events_to_fetch: Collection[str]) -> None: + sql = """ + SELECT event_id, chain_id, sequence_number + FROM event_auth_chains + WHERE %s + """ + for batch in batch_iter(events_to_fetch, 1000): + clause, args = make_in_list_sql_clause( + txn.database_engine, "event_id", batch + ) + txn.execute(sql % (clause,), args) - for event_id, chain_id, sequence_number in txn: - chain_info[event_id] = (chain_id, sequence_number) - seen_chains.add(chain_id) - chain_to_event.setdefault(chain_id, {})[sequence_number] = event_id + for event_id, chain_id, sequence_number in txn: + chain_info[event_id] = (chain_id, sequence_number) + seen_chains.add(chain_id) + chain_to_event.setdefault(chain_id, {})[sequence_number] = event_id + + fetch_chain_info(initial_events) # Check that we actually have a chain ID for all the events. events_missing_chain_info = initial_events.difference(chain_info) + + # The result set to return, i.e. the auth chain difference. + result: Set[str] = set() + if events_missing_chain_info: - # This can happen due to e.g. downgrade/upgrade of the server. We - # raise an exception and fall back to the previous algorithm. - logger.info( - "Unexpectedly found that events don't have chain IDs in room %s: %s", + # For some reason we have events we haven't calculated the chain + # index for, so we need to handle those separately. This should only + # happen for older rooms where the server doesn't have all the auth + # events. + result = self._fixup_auth_chain_difference_sets( + txn, room_id, - events_missing_chain_info, + state_sets=state_sets, + events_missing_chain_info=events_missing_chain_info, + events_that_have_chain_index=chain_info, ) - raise _NoChainCoverIndex(room_id) + + # We now need to refetch any events that we have added to the state + # sets. + new_events_to_fetch = { + event_id + for state_set in state_sets + for event_id in state_set + if event_id not in initial_events + } + + fetch_chain_info(new_events_to_fetch) # Corresponds to `state_sets`, except as a map from chain ID to max # sequence number reachable from the state set. @@ -487,8 +510,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas chains: Dict[int, int] = {} set_to_chain.append(chains) - for event_id in state_set: - chain_id, seq_no = chain_info[event_id] + for state_id in state_set: + chain_id, seq_no = chain_info[state_id] chains[chain_id] = max(seq_no, chains.get(chain_id, 0)) @@ -532,7 +555,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # from *any* state set and the minimum sequence number reachable from # *all* state sets. Events in that range are in the auth chain # difference. - result = set() # Mapping from chain ID to the range of sequence numbers that should be # pulled from the database. @@ -588,6 +610,122 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas return result + def _fixup_auth_chain_difference_sets( + self, + txn: LoggingTransaction, + room_id: str, + state_sets: List[Set[str]], + events_missing_chain_info: Set[str], + events_that_have_chain_index: Collection[str], + ) -> Set[str]: + """Helper for `_get_auth_chain_difference_using_cover_index_txn` to + handle the case where we haven't calculated the chain cover index for + all events. + + This modifies `state_sets` so that they only include events that have a + chain cover index, and returns a set of event IDs that are part of the + auth difference. + """ + + # This works similarly to the handling of unpersisted events in + # `synapse.state.v2_get_auth_chain_difference`. We uses the observation + # that if you can split the set of events into two classes X and Y, + # where no events in Y have events in X in their auth chain, then we can + # calculate the auth difference by considering X and Y separately. + # + # We do this in three steps: + # 1. Compute the set of events without chain cover index belonging to + # the auth difference. + # 2. Replacing the un-indexed events in the state_sets with their auth + # events, recursively, until the state_sets contain only indexed + # events. We can then calculate the auth difference of those state + # sets using the chain cover index. + # 3. Add the results of 1 and 2 together. + + # By construction we know that all events that we haven't persisted the + # chain cover index for are contained in + # `event_auth_chain_to_calculate`, so we pull out the events from those + # rather than doing recursive queries to walk the auth chain. + # + # We pull out those events with their auth events, which gives us enough + # information to construct the auth chain of an event up to auth events + # that have the chain cover index. + sql = """ + SELECT tc.event_id, ea.auth_id, eac.chain_id IS NOT NULL + FROM event_auth_chain_to_calculate AS tc + LEFT JOIN event_auth AS ea USING (event_id) + LEFT JOIN event_auth_chains AS eac ON (ea.auth_id = eac.event_id) + WHERE tc.room_id = ? + """ + txn.execute(sql, (room_id,)) + event_to_auth_ids: Dict[str, Set[str]] = {} + events_that_have_chain_index = set(events_that_have_chain_index) + for event_id, auth_id, auth_id_has_chain in txn: + s = event_to_auth_ids.setdefault(event_id, set()) + if auth_id is not None: + s.add(auth_id) + if auth_id_has_chain: + events_that_have_chain_index.add(auth_id) + + if events_missing_chain_info - event_to_auth_ids.keys(): + # Uh oh, we somehow haven't correctly done the chain cover index, + # bail and fall back to the old method. + logger.info( + "Unexpectedly found that events don't have chain IDs in room %s: %s", + room_id, + events_missing_chain_info - event_to_auth_ids.keys(), + ) + raise _NoChainCoverIndex(room_id) + + # Create a map from event IDs we care about to their partial auth chain. + event_id_to_partial_auth_chain: Dict[str, Set[str]] = {} + for event_id, auth_ids in event_to_auth_ids.items(): + if not any(event_id in state_set for state_set in state_sets): + continue + + processing = set(auth_ids) + to_add = set() + while processing: + auth_id = processing.pop() + to_add.add(auth_id) + + sub_auth_ids = event_to_auth_ids.get(auth_id) + if sub_auth_ids is None: + continue + + processing.update(sub_auth_ids - to_add) + + event_id_to_partial_auth_chain[event_id] = to_add + + # Now we do two things: + # 1. Update the state sets to only include indexed events; and + # 2. Create a new list containing the auth chains of the un-indexed + # events + unindexed_state_sets: List[Set[str]] = [] + for state_set in state_sets: + unindexed_state_set = set() + for event_id, auth_chain in event_id_to_partial_auth_chain.items(): + if event_id not in state_set: + continue + + unindexed_state_set.add(event_id) + + state_set.discard(event_id) + state_set.difference_update(auth_chain) + for auth_id in auth_chain: + if auth_id in events_that_have_chain_index: + state_set.add(auth_id) + else: + unindexed_state_set.add(auth_id) + + unindexed_state_sets.append(unindexed_state_set) + + # Calculate and return the auth difference of the un-indexed events. + union = unindexed_state_sets[0].union(*unindexed_state_sets[1:]) + intersection = unindexed_state_sets[0].intersection(*unindexed_state_sets[1:]) + + return union - intersection + def _get_auth_chain_difference_txn( self, txn: LoggingTransaction, state_sets: List[Set[str]] ) -> Set[str]: diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 9c151a5e62..7a4ecab2d5 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -13,7 +13,19 @@ # limitations under the License. import datetime -from typing import Dict, List, Tuple, Union, cast +from typing import ( + Collection, + Dict, + FrozenSet, + Iterable, + List, + Mapping, + Set, + Tuple, + TypeVar, + Union, + cast, +) import attr from parameterized import parameterized @@ -38,6 +50,138 @@ from synapse.util import Clock, json_encoder import tests.unittest import tests.utils +# The silly auth graph we use to test the auth difference algorithm, +# where the top are the most recent events. +# +# A B +# \ / +# D E +# \ | +# ` F C +# | /| +# G ´ | +# | \ | +# H I +# | | +# K J + +AUTH_GRAPH: Dict[str, List[str]] = { + "a": ["e"], + "b": ["e"], + "c": ["g", "i"], + "d": ["f"], + "e": ["f"], + "f": ["g"], + "g": ["h", "i"], + "h": ["k"], + "i": ["j"], + "k": [], + "j": [], +} + +DEPTH_GRAPH = { + "a": 7, + "b": 7, + "c": 4, + "d": 6, + "e": 6, + "f": 5, + "g": 3, + "h": 2, + "i": 2, + "k": 1, + "j": 1, +} + +T = TypeVar("T") + + +def get_all_topologically_sorted_orders( + nodes: Iterable[T], + graph: Mapping[T, Collection[T]], +) -> List[List[T]]: + """Given a set of nodes and a graph, return all possible topological + orderings. + """ + + # This is implemented by Kahn's algorithm, and forking execution each time + # we have a choice over which node to consider next. + + degree_map = {node: 0 for node in nodes} + reverse_graph: Dict[T, Set[T]] = {} + + for node, edges in graph.items(): + if node not in degree_map: + continue + + for edge in set(edges): + if edge in degree_map: + degree_map[node] += 1 + + reverse_graph.setdefault(edge, set()).add(node) + reverse_graph.setdefault(node, set()) + + zero_degree = [node for node, degree in degree_map.items() if degree == 0] + + return _get_all_topologically_sorted_orders_inner( + reverse_graph, zero_degree, degree_map + ) + + +def _get_all_topologically_sorted_orders_inner( + reverse_graph: Dict[T, Set[T]], + zero_degree: List[T], + degree_map: Dict[T, int], +) -> List[List[T]]: + new_paths = [] + + # Rather than only choosing *one* item from the list of nodes with zero + # degree, we "fork" execution and run the algorithm for each node in the + # zero degree. + for node in zero_degree: + new_degree_map = degree_map.copy() + new_zero_degree = zero_degree.copy() + new_zero_degree.remove(node) + + for edge in reverse_graph.get(node, []): + if edge in new_degree_map: + new_degree_map[edge] -= 1 + if new_degree_map[edge] == 0: + new_zero_degree.append(edge) + + paths = _get_all_topologically_sorted_orders_inner( + reverse_graph, new_zero_degree, new_degree_map + ) + for path in paths: + path.insert(0, node) + + new_paths.extend(paths) + + if not new_paths: + return [[]] + + return new_paths + + +def get_all_topologically_consistent_subsets( + nodes: Iterable[T], + graph: Mapping[T, Collection[T]], +) -> Set[FrozenSet[T]]: + """Get all subsets of the graph where if node N is in the subgraph, then all + nodes that can reach that node (i.e. for all X there exists a path X -> N) + are in the subgraph. + """ + all_topological_orderings = get_all_topologically_sorted_orders(nodes, graph) + + graph_subsets = set() + for ordering in all_topological_orderings: + ordering.reverse() + + for idx in range(len(ordering)): + graph_subsets.add(frozenset(ordering[:idx])) + + return graph_subsets + @attr.s(auto_attribs=True, frozen=True, slots=True) class _BackfillSetupInfo: @@ -172,49 +316,6 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): def _setup_auth_chain(self, use_chain_cover_index: bool) -> str: room_id = "@ROOM:local" - # The silly auth graph we use to test the auth difference algorithm, - # where the top are the most recent events. - # - # A B - # \ / - # D E - # \ | - # ` F C - # | /| - # G ´ | - # | \ | - # H I - # | | - # K J - - auth_graph: Dict[str, List[str]] = { - "a": ["e"], - "b": ["e"], - "c": ["g", "i"], - "d": ["f"], - "e": ["f"], - "f": ["g"], - "g": ["h", "i"], - "h": ["k"], - "i": ["j"], - "k": [], - "j": [], - } - - depth_map = { - "a": 7, - "b": 7, - "c": 4, - "d": 6, - "e": 6, - "f": 5, - "g": 3, - "h": 2, - "i": 2, - "k": 1, - "j": 1, - } - # Mark the room as maybe having a cover index. def store_room(txn: LoggingTransaction) -> None: @@ -238,9 +339,9 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): def insert_event(txn: LoggingTransaction) -> None: stream_ordering = 0 - for event_id in auth_graph: + for event_id in AUTH_GRAPH: stream_ordering += 1 - depth = depth_map[event_id] + depth = DEPTH_GRAPH[event_id] self.store.db_pool.simple_insert_txn( txn, @@ -260,8 +361,8 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): self.persist_events._persist_event_auth_chain_txn( txn, [ - cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id])) - for event_id in auth_graph + cast(EventBase, FakeEvent(event_id, room_id, AUTH_GRAPH[event_id])) + for event_id in AUTH_GRAPH ], ) @@ -344,7 +445,51 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): room_id = self._setup_auth_chain(use_chain_cover_index) # Now actually test that various combinations give the right result: + self.assert_auth_diff_is_expected(room_id) + @parameterized.expand( + [ + [graph_subset] + for graph_subset in get_all_topologically_consistent_subsets( + AUTH_GRAPH, AUTH_GRAPH + ) + ] + ) + def test_auth_difference_partial(self, graph_subset: Collection[str]) -> None: + """Test that if we only have a chain cover index on a partial subset of + the room we still get the correct auth chain difference. + + We do this by removing the chain cover index for every valid subset of the + graph. + """ + room_id = self._setup_auth_chain(True) + + for event_id in graph_subset: + # Remove chain cover from that event. + self.get_success( + self.store.db_pool.simple_delete( + table="event_auth_chains", + keyvalues={"event_id": event_id}, + desc="test_auth_difference_partial_remove", + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + table="event_auth_chain_to_calculate", + values={ + "event_id": event_id, + "room_id": room_id, + "type": "", + "state_key": "", + }, + desc="test_auth_difference_partial_remove", + ) + ) + + self.assert_auth_diff_is_expected(room_id) + + def assert_auth_diff_is_expected(self, room_id: str) -> None: + """Assert the auth chain difference returns the correct answers.""" difference = self.get_success( self.store.get_auth_chain_difference(room_id, [{"a"}, {"b"}]) ) From 406ff3eb62d4540aa3408bf067c78daab02f3af7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 10:39:07 +0100 Subject: [PATCH 334/562] Bump sentry-sdk from 1.28.1 to 1.29.2 (#16142) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index db1332a04b..9ef084e3d7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2385,13 +2385,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.28.1" +version = "1.29.2" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.28.1.tar.gz", hash = "sha256:dcd88c68aa64dae715311b5ede6502fd684f70d00a7cd4858118f0ba3153a3ae"}, - {file = "sentry_sdk-1.28.1-py2.py3-none-any.whl", hash = "sha256:6bdb25bd9092478d3a817cb0d01fa99e296aea34d404eac3ca0037faa5c2aa0a"}, + {file = "sentry-sdk-1.29.2.tar.gz", hash = "sha256:a99ee105384788c3f228726a88baf515fe7b5f1d2d0f215a03d194369f158df7"}, + {file = "sentry_sdk-1.29.2-py2.py3-none-any.whl", hash = "sha256:3e17215d8006612e2df02b0e73115eb8376c37e3f586d8436fa41644e605074d"}, ] [package.dependencies] From 07c0875aa57e0df4664083d5fc1adf47ff512f6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 10:41:00 +0100 Subject: [PATCH 335/562] Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2 (#16146) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9ef084e3d7..b8cf6f2850 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3013,13 +3013,13 @@ files = [ [[package]] name = "types-pyopenssl" -version = "23.2.0.1" +version = "23.2.0.2" description = "Typing stubs for pyOpenSSL" optional = false python-versions = "*" files = [ - {file = "types-pyOpenSSL-23.2.0.1.tar.gz", hash = "sha256:beeb5d22704c625a1e4b6dc756355c5b4af0b980138b702a9d9f932acf020903"}, - {file = "types_pyOpenSSL-23.2.0.1-py3-none-any.whl", hash = "sha256:0568553f104466f1b8e0db3360fbe6770137d02e21a1a45c209bf2b1b03d90d4"}, + {file = "types-pyOpenSSL-23.2.0.2.tar.gz", hash = "sha256:6a010dac9ecd42b582d7dd2cc3e9e40486b79b3b64bb2fffba1474ff96af906d"}, + {file = "types_pyOpenSSL-23.2.0.2-py3-none-any.whl", hash = "sha256:19536aa3debfbe25a918cf0d898e9f5fbbe6f3594a429da7914bf331deb1b342"}, ] [package.dependencies] From 35d260d0650820b905e04fa53fa97379b1d0c63d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 10:41:44 +0100 Subject: [PATCH 336/562] Bump click from 8.1.6 to 8.1.7 (#16145) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index b8cf6f2850..1f3dcfe352 100644 --- a/poetry.lock +++ b/poetry.lock @@ -397,13 +397,13 @@ files = [ [[package]] name = "click" -version = "8.1.6" +version = "8.1.7" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" files = [ - {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"}, - {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"}, + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, ] [package.dependencies] From 1e5a0e07a76f3f65ef746c6d7ecad301cd82d181 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 10:44:14 +0100 Subject: [PATCH 337/562] Bump pygithub from 1.59.0 to 1.59.1 (#16144) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1f3dcfe352..6b60e04f97 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1881,13 +1881,13 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygithub" -version = "1.59.0" +version = "1.59.1" description = "Use the full Github API v3" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.59.0-py3-none-any.whl", hash = "sha256:126bdbae72087d8d038b113aab6b059b4553cb59348e3024bb1a1cae406ace9e"}, - {file = "PyGithub-1.59.0.tar.gz", hash = "sha256:6e05ff49bac3caa7d1d6177a10c6e55a3e20c85b92424cc198571fd0cf786690"}, + {file = "PyGithub-1.59.1-py3-none-any.whl", hash = "sha256:3d87a822e6c868142f0c2c4bf16cce4696b5a7a4d142a7bd160e1bdf75bc54a9"}, + {file = "PyGithub-1.59.1.tar.gz", hash = "sha256:c44e3a121c15bf9d3a5cc98d94c9a047a5132a9b01d22264627f58ade9ddc217"}, ] [package.dependencies] From 79c349dfb8ff901f1728d609938fb1cd50ddce1e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 10:45:18 +0100 Subject: [PATCH 338/562] Bump ijson from 3.2.1 to 3.2.3 (#16143) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 158 ++++++++++++++++++++++++++-------------------------- 1 file changed, 79 insertions(+), 79 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6b60e04f97..e62c10da9f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -726,89 +726,89 @@ files = [ [[package]] name = "ijson" -version = "3.2.1" +version = "3.2.3" description = "Iterative JSON parser with standard Python iterator interfaces" optional = false python-versions = "*" files = [ - {file = "ijson-3.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6f827f6961f093e1055a2be0c3137f0e7d667979da455ac9648f72d4a2bb8970"}, - {file = "ijson-3.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b6e51f4497065cd0d09f5e906cd538a8d22609eab716e3c883769acf147ab1b6"}, - {file = "ijson-3.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f022686c40bff3e340627a5a0c9212718d529e787ada3b76ba546d47a9ecdbbd"}, - {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4105c15a13fa1dc24ebd3bf2e679fa14dcbfcc48bc39138a0fa3f4ddf6cc09b"}, - {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:404423e666f185dfb753ddc92705c84dffdc4cc872aaf825bbe0607893cb5b02"}, - {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39e71f32830827cf21d0233a814092e5a23668e18f52eca5cac4f670d9df1240"}, - {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43af7ed5292caa1452747e2b62485b6c0ece4bcbc5bf6f2758abd547e4124a14"}, - {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e805aa6897a11b0f73f1f6bca078981df8960aeeccf527a214f240409c742bab"}, - {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5b2df0bd84889e9017a670328fe3e82ec509fd6744c7ac2c99c7ee2300d76afa"}, - {file = "ijson-3.2.1-cp310-cp310-win32.whl", hash = "sha256:675259c7ea7f51ffaf8cb9e79bf875e28bb09622892943f4f415588fd7ab7bec"}, - {file = "ijson-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:90d4b2eb771a3585c8186820fe50e3282ef62477b865e765a50a8295674abeac"}, - {file = "ijson-3.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fc581a61e210bf6013c1fa6536566e51127be1cfbd69539b63d8b813206d2fe0"}, - {file = "ijson-3.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75cdf7ad4c00a8f5ac94ff27e3b7c1bf5ac463f125bca2be1744c5bc9600db5c"}, - {file = "ijson-3.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85a2bf4636ace4d92e7c5d857a1c5694f42407c868953cf2927f18127bcd0d58"}, - {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe0cb66e7dd4aa11da5fff60bdf5ee04819a5e6a57acf7ca12c65f7fc009afc"}, - {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6f7957ad38cb714378944032f2c2ee9c6531b5b0b38c5ccd08cedbb0ceddd02"}, - {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13283d264cca8a63e5bad91e82eec39711e95893e7e8d4a419799a8c5f85203a"}, - {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:12c24cde850fe79bc806be0e9fc38b47dd5ac0a223070ccb12e9b695425e2936"}, - {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2ce8eed838e5a0791cb5948117b5453f2b3b3c28d93d06ee2bbf2c198c47881c"}, - {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b81c2589f191b0dc741f532be00b4bea617297dd9698431c8053e2d28272d4db"}, - {file = "ijson-3.2.1-cp311-cp311-win32.whl", hash = "sha256:ba2beac56ac96f728d0f2430e4c667c66819a423d321bb9db9ebdebd803e1b5b"}, - {file = "ijson-3.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:c71614ed4bbc6a32ff1e42d7ce92a176fb67d658913343792d2c4567aa130817"}, - {file = "ijson-3.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:683fc8b0ea085e75ea34044fdc70649b37367d494f132a2bd1e59d7135054d89"}, - {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deeaecec2f4e20e8bec20b0a5cdc34daebe7903f2e700f7dcaef68b5925d35ea"}, - {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11923ac3188877f19dbb7051f7345202701cc39bf8e5ac44f8ae536c9eca8c82"}, - {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:400deefcdae21e90fc39c1dcfc6ba2df24537e8c65bd57b763ed5256b73ba64d"}, - {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:56bc4bad53770710a3a91944fe640fdeb269987a14352b74ebbad2aa55801c00"}, - {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f5a179523e085126844c6161aabcd193dbb5747bd01fadb68e92abf048f32ec9"}, - {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ee24655986e4415fbb7a0cf51445fff3072ceac0e219f4bbbd5c53535a3c5680"}, - {file = "ijson-3.2.1-cp36-cp36m-win32.whl", hash = "sha256:4a5c672b0540005c1bb0bba97aa559a87a2e4ee409fc68e2f5ba5b30f009ac99"}, - {file = "ijson-3.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cfaf1d89b0e122e69c87a15db6d6f44feb9db96d2af7fe88cdc464177a257b5d"}, - {file = "ijson-3.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1cbd052eb67c1b3611f25974ba967886e89391faaf55afec93808c19f06ca612"}, - {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13ffc491886e5d7bde7d68712d168bce0141b2a918db1164bc8599c0123e293"}, - {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc4c4fc6bafc777f8422fe36edb1cbd72a13cb29695893a064c9c95776a4bdf9"}, - {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42fcb2bf9748c26f004690b2feb6e13e4875bb7c9d83535f887c21e0a982a7c"}, - {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0c92f7bc2f3a947c2ba7f7aa48382c36079f8259c930e81d9164341f9b853c45"}, - {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd497042db562379339660e787bc8679ed3abaa740768d39bc3746e769e7c7a5"}, - {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7d61c7cd8ddd75dcef818ff5a111a31b902a6a0e410ee0c2b2ecaa6dac92658a"}, - {file = "ijson-3.2.1-cp37-cp37m-win32.whl", hash = "sha256:36caf624d263fc40e7e805d759d09ea368d8cf497aecb3241ac2f0a286ad8eca"}, - {file = "ijson-3.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:32f9ed25ff80942e433119600bca13b86a8f9b8b0966edbc1d91a48ccbdd4d54"}, - {file = "ijson-3.2.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e89bbd747140eac3a3c9e7e5835b90d85c4a02763fc5134861bfc1ea03b66ae7"}, - {file = "ijson-3.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d69b4b1d509de36ec42a0e4af30ede39fb754e4039b2928ef7282ebc2125ffdd"}, - {file = "ijson-3.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e7feb0771f50deabe6ce85b210fa9e005843d3d3c60fb3315d69e1f9d0d75e0c"}, - {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fd8148a363888054ff06eaaa1103f2f98720ab39666084a214e4fedfc13cf64"}, - {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:598638dcc5141e9ae269903901877103f5362e0db4443e34721df8f8d34577b4"}, - {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e979190b7d0fabca20d6b7224ab1c1aa461ad1ab72ba94f1bb1e5894cd59f342"}, - {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bc810eb80b4f486c7957201ba2a53f53ddc9b3233af67e4359e29371bf04883b"}, - {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:26e758584611dfe826dd18ffd94dc0d8a062ce56e41674ad3bfa371c7b78c4b5"}, - {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:24e9ae5b35b85ea094b6c36495bc856089254aed6a48bada8d7eec5a04f74439"}, - {file = "ijson-3.2.1-cp38-cp38-win32.whl", hash = "sha256:4b5dc7b5b4b8cb3087d188f37911cd67e26672d33d3571e73440de3f0a86f7e6"}, - {file = "ijson-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:1af94ff40609270bbb3eac47e072582bb578f5023fac8408cccd80fe5892d221"}, - {file = "ijson-3.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2dda67affceebc52c8bc5fe72c3a4a1e338e4d4b0497dbac5089c2d3862df214"}, - {file = "ijson-3.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd780303ddfedc8d57cdb9f2d53a8cea2f2f4a6fb857bf8fe5a0c3ab1d4ca901"}, - {file = "ijson-3.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4fbab6af1bab88a8e46beda08cf44610eed0adb8d157a1a60b4bb6c3a121c6de"}, - {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97a07988a1e0ce2bc8e8a62eb5f25195a3bd58a939ac353cbc6018a548cc08d"}, - {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a65671a6826ae723837143914c823ad7bcc0d1a3e38d87c71df897a2556fb48f"}, - {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1806372008bbed9ee92db5747e38c047fa1c4ee89cb2dd5daaa57feb46ce50a"}, - {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:91e5a8e96f78a59e2520078c227a4fec5bf91c13adeded9e33fb13981cb823c3"}, - {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1f820fce8ef093718f2319ff6f1322390664659b783775919dadccb1b470153d"}, - {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bca3e8c91a1076a20620dbaa6a2848772b0e8a4055e86d42d3fa39221b53ed1a"}, - {file = "ijson-3.2.1-cp39-cp39-win32.whl", hash = "sha256:de87f137b7438d43840f4339a37d4e6a58c987f4bb2a70609969f854f8ae20f3"}, - {file = "ijson-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:0caebb8350b47266a58b766ec08e1de441d6d160702c428b5cf7504d93c832c4"}, - {file = "ijson-3.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37389785c1abd27fcc24800fcfa9a6b1022743413e4056507fd32356b623ff33"}, - {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b364b82231d51cbeae52468c3b27e8a042e544ab764c8f3975e912cf010603f"}, - {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a5999d0ec28a8ec47cf20c736fd4f895dc077bf6441bf237b00b074315a295d"}, - {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd481857a39305517fb6f1313d558c2dc4e78c9e9384cc5bc1c3e28f1afbedf"}, - {file = "ijson-3.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:545f62f12f89350d4d73f2a779cb269198ae578fac080085a1927148b803e602"}, - {file = "ijson-3.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4d5622505d01c2f3d7b9638c1eb8c747eb550936b505225893704289ff28576f"}, - {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20293bb36423b129fad3753858ccf7b2ccb5b2c0d3759efe810d0b9d79633a7e"}, - {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cd8a4921b852fd2cb5b0c985540c97ff6893139a57fe7121d510ec5d1c0ca44"}, - {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc902ff1ae1efed7d526294d7a9dd3df66d29b2cdc05fb5479838fef1327a534"}, - {file = "ijson-3.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2925a7978d8170146a9cb49a15a982b71fbbf21980bf2e16cd90c528545b7c02"}, - {file = "ijson-3.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c21c6509f6944939399f3630c5dc424d30d71d375f6cd58f9af56158fdf7251c"}, - {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5729fc7648bc972d70922d7dad15459cca3a9e5ed0328eb9ae3ffa004066194"}, - {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:805a2d5ed5a15d60327bc9347f2d4125ab621fb18071db98b1c598f1ee99e8f1"}, - {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d0220a4b6c63f44589e429157174e3f4b8d1e534d5fb82bdb43a7f8dd77ae4b"}, - {file = "ijson-3.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:271d9b7c083f65c58ff0afd9dbb5d2f3d445f734632aebfef4a261b0a337abdb"}, - {file = "ijson-3.2.1.tar.gz", hash = "sha256:8574bf19f31fab870488769ad919a80f130825236ac8bde9a733f69c2961d7a7"}, + {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0a4ae076bf97b0430e4e16c9cb635a6b773904aec45ed8dcbc9b17211b8569ba"}, + {file = "ijson-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cfced0a6ec85916eb8c8e22415b7267ae118eaff2a860c42d2cc1261711d0d31"}, + {file = "ijson-3.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b9d1141cfd1e6d6643aa0b4876730d0d28371815ce846d2e4e84a2d4f471cf3"}, + {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0a27db6454edd6013d40a956d008361aac5bff375a9c04ab11fc8c214250b5"}, + {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0d526ccb335c3c13063c273637d8611f32970603dfb182177b232d01f14c23"}, + {file = "ijson-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:545a30b3659df2a3481593d30d60491d1594bc8005f99600e1bba647bb44cbb5"}, + {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9680e37a10fedb3eab24a4a7e749d8a73f26f1a4c901430e7aa81b5da15f7307"}, + {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2a80c0bb1053055d1599e44dc1396f713e8b3407000e6390add72d49633ff3bb"}, + {file = "ijson-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f05ed49f434ce396ddcf99e9fd98245328e99f991283850c309f5e3182211a79"}, + {file = "ijson-3.2.3-cp310-cp310-win32.whl", hash = "sha256:b4eb2304573c9fdf448d3fa4a4fdcb727b93002b5c5c56c14a5ffbbc39f64ae4"}, + {file = "ijson-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:923131f5153c70936e8bd2dd9dcfcff43c67a3d1c789e9c96724747423c173eb"}, + {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:904f77dd3d87736ff668884fe5197a184748eb0c3e302ded61706501d0327465"}, + {file = "ijson-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0974444c1f416e19de1e9f567a4560890095e71e81623c509feff642114c1e53"}, + {file = "ijson-3.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1a4b8eb69b6d7b4e94170aa991efad75ba156b05f0de2a6cd84f991def12ff9"}, + {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d052417fd7ce2221114f8d3b58f05a83c1a2b6b99cafe0b86ac9ed5e2fc889df"}, + {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b8064a85ec1b0beda7dd028e887f7112670d574db606f68006c72dd0bb0e0e2"}, + {file = "ijson-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaac293853f1342a8d2a45ac1f723c860f700860e7743fb97f7b76356df883a8"}, + {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6c32c18a934c1dc8917455b0ce478fd7a26c50c364bd52c5a4fb0fc6bb516af7"}, + {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:713a919e0220ac44dab12b5fed74f9130f3480e55e90f9d80f58de129ea24f83"}, + {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"}, + {file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"}, + {file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"}, + {file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"}, + {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"}, + {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"}, + {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85afdb3f3a5d0011584d4fa8e6dccc5936be51c27e84cd2882fe904ca3bd04c5"}, + {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4fc35d569eff3afa76bfecf533f818ecb9390105be257f3f83c03204661ace70"}, + {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:455d7d3b7a6aacfb8ab1ebcaf697eedf5be66e044eac32508fccdc633d995f0e"}, + {file = "ijson-3.2.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c63f3d57dbbac56cead05b12b81e8e1e259f14ce7f233a8cbe7fa0996733b628"}, + {file = "ijson-3.2.3-cp36-cp36m-win32.whl", hash = "sha256:a4d7fe3629de3ecb088bff6dfe25f77be3e8261ed53d5e244717e266f8544305"}, + {file = "ijson-3.2.3-cp36-cp36m-win_amd64.whl", hash = "sha256:96190d59f015b5a2af388a98446e411f58ecc6a93934e036daa75f75d02386a0"}, + {file = "ijson-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:35194e0b8a2bda12b4096e2e792efa5d4801a0abb950c48ade351d479cd22ba5"}, + {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1053fb5f0b010ee76ca515e6af36b50d26c1728ad46be12f1f147a835341083"}, + {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:211124cff9d9d139dd0dfced356f1472860352c055d2481459038b8205d7d742"}, + {file = "ijson-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92dc4d48e9f6a271292d6079e9fcdce33c83d1acf11e6e12696fb05c5889fe74"}, + {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3dcc33ee56f92a77f48776014ddb47af67c33dda361e84371153c4f1ed4434e1"}, + {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98c6799925a5d1988da4cd68879b8eeab52c6e029acc45e03abb7921a4715c4b"}, + {file = "ijson-3.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4252e48c95cd8ceefc2caade310559ab61c37d82dfa045928ed05328eb5b5f65"}, + {file = "ijson-3.2.3-cp37-cp37m-win32.whl", hash = "sha256:644f4f03349ff2731fd515afd1c91b9e439e90c9f8c28292251834154edbffca"}, + {file = "ijson-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ba33c764afa9ecef62801ba7ac0319268a7526f50f7601370d9f8f04e77fc02b"}, + {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4b2ec8c2a3f1742cbd5f36b65e192028e541b5fd8c7fd97c1fc0ca6c427c704a"}, + {file = "ijson-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dc357da4b4ebd8903e77dbcc3ce0555ee29ebe0747c3c7f56adda423df8ec89"}, + {file = "ijson-3.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcc51c84bb220ac330122468fe526a7777faa6464e3b04c15b476761beea424f"}, + {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8d54b624629f9903005c58d9321a036c72f5c212701bbb93d1a520ecd15e370"}, + {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6ea7c7e3ec44742e867c72fd750c6a1e35b112f88a917615332c4476e718d40"}, + {file = "ijson-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:916acdc5e504f8b66c3e287ada5d4b39a3275fc1f2013c4b05d1ab9933671a6c"}, + {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81815b4184b85ce124bfc4c446d5f5e5e643fc119771c5916f035220ada29974"}, + {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b49fd5fe1cd9c1c8caf6c59f82b08117dd6bea2ec45b641594e25948f48f4169"}, + {file = "ijson-3.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:86b3c91fdcb8ffb30556c9669930f02b7642de58ca2987845b04f0d7fe46d9a8"}, + {file = "ijson-3.2.3-cp38-cp38-win32.whl", hash = "sha256:a729b0c8fb935481afe3cf7e0dadd0da3a69cc7f145dbab8502e2f1e01d85a7c"}, + {file = "ijson-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:d34e049992d8a46922f96483e96b32ac4c9cffd01a5c33a928e70a283710cd58"}, + {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9c2a12dcdb6fa28f333bf10b3a0f80ec70bc45280d8435be7e19696fab2bc706"}, + {file = "ijson-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1844c5b57da21466f255a0aeddf89049e730d7f3dfc4d750f0e65c36e6a61a7c"}, + {file = "ijson-3.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ec3e5ff2515f1c40ef6a94983158e172f004cd643b9e4b5302017139b6c96e4"}, + {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46bafb1b9959872a1f946f8dd9c6f1a30a970fc05b7bfae8579da3f1f988e598"}, + {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab4db9fee0138b60e31b3c02fff8a4c28d7b152040553b6a91b60354aebd4b02"}, + {file = "ijson-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4bc87e69d1997c6a55fff5ee2af878720801ff6ab1fb3b7f94adda050651e37"}, + {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e9fd906f0c38e9f0bfd5365e1bed98d649f506721f76bb1a9baa5d7374f26f19"}, + {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e84d27d1acb60d9102728d06b9650e5b7e5cb0631bd6e3dfadba8fb6a80d6c2f"}, + {file = "ijson-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2cc04fc0a22bb945cd179f614845c8b5106c0b3939ee0d84ce67c7a61ac1a936"}, + {file = "ijson-3.2.3-cp39-cp39-win32.whl", hash = "sha256:e641814793a037175f7ec1b717ebb68f26d89d82cfd66f36e588f32d7e488d5f"}, + {file = "ijson-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:6bd3e7e91d031f1e8cea7ce53f704ab74e61e505e8072467e092172422728b22"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06f9707da06a19b01013f8c65bf67db523662a9b4a4ff027e946e66c261f17f0"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be8495f7c13fa1f622a2c6b64e79ac63965b89caf664cc4e701c335c652d15f2"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7596b42f38c3dcf9d434dddd50f46aeb28e96f891444c2b4b1266304a19a2c09"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbac4e9609a1086bbad075beb2ceec486a3b138604e12d2059a33ce2cba93051"}, + {file = "ijson-3.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:db2d6341f9cb538253e7fe23311d59252f124f47165221d3c06a7ed667ecd595"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fa8b98be298efbb2588f883f9953113d8a0023ab39abe77fe734b71b46b1220a"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:674e585361c702fad050ab4c153fd168dc30f5980ef42b64400bc84d194e662d"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd12e42b9cb9c0166559a3ffa276b4f9fc9d5b4c304e5a13668642d34b48b634"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31e0d771d82def80cd4663a66de277c3b44ba82cd48f630526b52f74663c639"}, + {file = "ijson-3.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7ce4c70c23521179d6da842bb9bc2e36bb9fad1e0187e35423ff0f282890c9ca"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39f551a6fbeed4433c85269c7c8778e2aaea2501d7ebcb65b38f556030642c17"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b14d322fec0de7af16f3ef920bf282f0dd747200b69e0b9628117f381b7775b"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7851a341429b12d4527ca507097c959659baf5106c7074d15c17c387719ffbcd"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db3bf1b42191b5cc9b6441552fdcb3b583594cb6b19e90d1578b7cbcf80d0fae"}, + {file = "ijson-3.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6f662dc44362a53af3084d3765bb01cd7b4734d1f484a6095cad4cb0cbfe5374"}, + {file = "ijson-3.2.3.tar.gz", hash = "sha256:10294e9bf89cb713da05bc4790bdff616610432db561964827074898e174f917"}, ] [[package]] From 358896e1b835bf693ef40d4cf9f10077432e935b Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 21 Aug 2023 14:17:13 +0200 Subject: [PATCH 339/562] Implements a task scheduler for resumable potentially long running tasks (#15891) --- changelog.d/15891.feature | 1 + synapse/app/generic_worker.py | 2 + synapse/server.py | 7 +- synapse/storage/databases/main/__init__.py | 2 + .../storage/databases/main/task_scheduler.py | 202 ++++++++++ synapse/storage/schema/__init__.py | 1 + .../main/delta/80/02_scheduled_tasks.sql | 28 ++ synapse/types/__init__.py | 39 ++ synapse/util/task_scheduler.py | 364 ++++++++++++++++++ tests/util/test_task_scheduler.py | 186 +++++++++ 10 files changed, 831 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15891.feature create mode 100644 synapse/storage/databases/main/task_scheduler.py create mode 100644 synapse/storage/schema/main/delta/80/02_scheduled_tasks.sql create mode 100644 synapse/util/task_scheduler.py create mode 100644 tests/util/test_task_scheduler.py diff --git a/changelog.d/15891.feature b/changelog.d/15891.feature new file mode 100644 index 0000000000..5024b5adc4 --- /dev/null +++ b/changelog.d/15891.feature @@ -0,0 +1 @@ +Implements a task scheduler for resumable potentially long running tasks. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index dc79efcc14..d25e3548e0 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -91,6 +91,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore +from synapse.storage.databases.main.task_scheduler import TaskSchedulerWorkerStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore @@ -144,6 +145,7 @@ class GenericWorkerStore( TransactionWorkerStore, LockStore, SessionStore, + TaskSchedulerWorkerStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. diff --git a/synapse/server.py b/synapse/server.py index e753ff0377..7cdd3ea3c2 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -142,6 +142,7 @@ from synapse.util.distributor import Distributor from synapse.util.macaroons import MacaroonGenerator from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import random_string +from synapse.util.task_scheduler import TaskScheduler logger = logging.getLogger(__name__) @@ -360,6 +361,7 @@ class HomeServer(metaclass=abc.ABCMeta): """ for i in self.REQUIRED_ON_BACKGROUND_TASK_STARTUP: getattr(self, "get_" + i + "_handler")() + self.get_task_scheduler() def get_reactor(self) -> ISynapseReactor: """ @@ -912,6 +914,9 @@ class HomeServer(metaclass=abc.ABCMeta): """Usage metrics shared between phone home stats and the prometheus exporter.""" return CommonUsageMetricsManager(self) - @cache_in_self def get_worker_locks_handler(self) -> WorkerLocksHandler: return WorkerLocksHandler(self) + + @cache_in_self + def get_task_scheduler(self) -> TaskScheduler: + return TaskScheduler(self) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index e17f25e87a..a85633efcd 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -70,6 +70,7 @@ from .state import StateStore from .stats import StatsStore from .stream import StreamWorkerStore from .tags import TagsStore +from .task_scheduler import TaskSchedulerWorkerStore from .transactions import TransactionWorkerStore from .ui_auth import UIAuthStore from .user_directory import UserDirectoryStore @@ -127,6 +128,7 @@ class DataStore( CacheInvalidationWorkerStore, LockStore, SessionStore, + TaskSchedulerWorkerStore, ): def __init__( self, diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py new file mode 100644 index 0000000000..1fb3180c3c --- /dev/null +++ b/synapse/storage/databases/main/task_scheduler.py @@ -0,0 +1,202 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, + make_in_list_sql_clause, +) +from synapse.types import JsonDict, JsonMapping, ScheduledTask, TaskStatus +from synapse.util import json_encoder + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class TaskSchedulerWorkerStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + @staticmethod + def _convert_row_to_task(row: Dict[str, Any]) -> ScheduledTask: + row["status"] = TaskStatus(row["status"]) + if row["params"] is not None: + row["params"] = db_to_json(row["params"]) + if row["result"] is not None: + row["result"] = db_to_json(row["result"]) + return ScheduledTask(**row) + + async def get_scheduled_tasks( + self, + *, + actions: Optional[List[str]] = None, + resource_id: Optional[str] = None, + statuses: Optional[List[TaskStatus]] = None, + max_timestamp: Optional[int] = None, + ) -> List[ScheduledTask]: + """Get a list of scheduled tasks from the DB. + + Args: + actions: Limit the returned tasks to those specific action names + resource_id: Limit the returned tasks to the specific resource id, if specified + statuses: Limit the returned tasks to the specific statuses + max_timestamp: Limit the returned tasks to the ones that have + a timestamp inferior to the specified one + + Returns: a list of `ScheduledTask`, ordered by increasing timestamps + """ + + def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]: + clauses: List[str] = [] + args: List[Any] = [] + if resource_id: + clauses.append("resource_id = ?") + args.append(resource_id) + if actions is not None: + clause, temp_args = make_in_list_sql_clause( + txn.database_engine, "action", actions + ) + clauses.append(clause) + args.extend(temp_args) + if statuses is not None: + clause, temp_args = make_in_list_sql_clause( + txn.database_engine, "status", statuses + ) + clauses.append(clause) + args.extend(temp_args) + if max_timestamp is not None: + clauses.append("timestamp <= ?") + args.append(max_timestamp) + + sql = "SELECT * FROM scheduled_tasks" + if clauses: + sql = sql + " WHERE " + " AND ".join(clauses) + + sql = sql + "ORDER BY timestamp" + + txn.execute(sql, args) + return self.db_pool.cursor_to_dict(txn) + + rows = await self.db_pool.runInteraction( + "get_scheduled_tasks", get_scheduled_tasks_txn + ) + return [TaskSchedulerWorkerStore._convert_row_to_task(row) for row in rows] + + async def insert_scheduled_task(self, task: ScheduledTask) -> None: + """Insert a specified `ScheduledTask` in the DB. + + Args: + task: the `ScheduledTask` to insert + """ + await self.db_pool.simple_insert( + "scheduled_tasks", + { + "id": task.id, + "action": task.action, + "status": task.status, + "timestamp": task.timestamp, + "resource_id": task.resource_id, + "params": None + if task.params is None + else json_encoder.encode(task.params), + "result": None + if task.result is None + else json_encoder.encode(task.result), + "error": task.error, + }, + desc="insert_scheduled_task", + ) + + async def update_scheduled_task( + self, + id: str, + timestamp: int, + *, + status: Optional[TaskStatus] = None, + result: Optional[JsonMapping] = None, + error: Optional[str] = None, + ) -> bool: + """Update a scheduled task in the DB with some new value(s). + + Args: + id: id of the `ScheduledTask` to update + timestamp: new timestamp of the task + status: new status of the task + result: new result of the task + error: new error of the task + + Returns: `False` if no matching row was found, `True` otherwise + """ + updatevalues: JsonDict = {"timestamp": timestamp} + if status is not None: + updatevalues["status"] = status + if result is not None: + updatevalues["result"] = json_encoder.encode(result) + if error is not None: + updatevalues["error"] = error + nb_rows = await self.db_pool.simple_update( + "scheduled_tasks", + {"id": id}, + updatevalues, + desc="update_scheduled_task", + ) + return nb_rows > 0 + + async def get_scheduled_task(self, id: str) -> Optional[ScheduledTask]: + """Get a specific `ScheduledTask` from its id. + + Args: + id: the id of the task to retrieve + + Returns: the task if available, `None` otherwise + """ + row = await self.db_pool.simple_select_one( + table="scheduled_tasks", + keyvalues={"id": id}, + retcols=( + "id", + "action", + "status", + "timestamp", + "resource_id", + "params", + "result", + "error", + ), + allow_none=True, + desc="get_scheduled_task", + ) + + return TaskSchedulerWorkerStore._convert_row_to_task(row) if row else None + + async def delete_scheduled_task(self, id: str) -> None: + """Delete a specific task from its id. + + Args: + id: the id of the task to delete + """ + await self.db_pool.simple_delete( + "scheduled_tasks", + keyvalues={"id": id}, + desc="delete_scheduled_task", + ) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 7de9949a5b..649d3c8e9f 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -113,6 +113,7 @@ Changes in SCHEMA_VERSION = 79 Changes in SCHEMA_VERSION = 80 - The event_txn_id_device_id is always written to for new events. + - Add tables for the task scheduler. """ diff --git a/synapse/storage/schema/main/delta/80/02_scheduled_tasks.sql b/synapse/storage/schema/main/delta/80/02_scheduled_tasks.sql new file mode 100644 index 0000000000..286d109ed7 --- /dev/null +++ b/synapse/storage/schema/main/delta/80/02_scheduled_tasks.sql @@ -0,0 +1,28 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- cf ScheduledTask docstring for the meaning of the fields. +CREATE TABLE IF NOT EXISTS scheduled_tasks( + id TEXT PRIMARY KEY, + action TEXT NOT NULL, + status TEXT NOT NULL, + timestamp BIGINT NOT NULL, + resource_id TEXT, + params TEXT, + result TEXT, + error TEXT +); + +CREATE INDEX IF NOT EXISTS scheduled_tasks_status ON scheduled_tasks(status); diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 073f682aca..e750417189 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -15,6 +15,7 @@ import abc import re import string +from enum import Enum from typing import ( TYPE_CHECKING, AbstractSet, @@ -969,3 +970,41 @@ class UserProfile(TypedDict): class RetentionPolicy: min_lifetime: Optional[int] = None max_lifetime: Optional[int] = None + + +class TaskStatus(str, Enum): + """Status of a scheduled task""" + + # Task is scheduled but not active + SCHEDULED = "scheduled" + # Task is active and probably running, and if not + # will be run on next scheduler loop run + ACTIVE = "active" + # Task has completed successfully + COMPLETE = "complete" + # Task is over and either returned a failed status, or had an exception + FAILED = "failed" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ScheduledTask: + """Description of a scheduled task""" + + # Id used to identify the task + id: str + # Name of the action to be run by this task + action: str + # Current status of this task + status: TaskStatus + # If the status is SCHEDULED then this represents when it should be launched, + # otherwise it represents the last time this task got a change of state. + # In milliseconds since epoch in system time timezone, usually UTC. + timestamp: int + # Optionally bind a task to some resource id for easy retrieval + resource_id: Optional[str] + # Optional parameters that will be passed to the function ran by the task + params: Optional[JsonMapping] + # Optional result that can be updated by the running task + result: Optional[JsonMapping] + # Optional error that should be assigned a value when the status is FAILED + error: Optional[str] diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py new file mode 100644 index 0000000000..773a8327f6 --- /dev/null +++ b/synapse/util/task_scheduler.py @@ -0,0 +1,364 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set, Tuple + +from prometheus_client import Gauge + +from twisted.python.failure import Failure + +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import JsonMapping, ScheduledTask, TaskStatus +from synapse.util.stringutils import random_string + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +running_tasks_gauge = Gauge( + "synapse_scheduler_running_tasks", + "The number of concurrent running tasks handled by the TaskScheduler", +) + + +class TaskScheduler: + """ + This is a simple task sheduler aimed at resumable tasks: usually we use `run_in_background` + to launch a background task, or Twisted `deferLater` if we want to do so later on. + + The problem with that is that the tasks will just stop and never be resumed if synapse + is stopped for whatever reason. + + How this works: + - A function mapped to a named action should first be registered with `register_action`. + This function will be called when trying to resuming tasks after a synapse shutdown, + so this registration should happen when synapse is initialised, NOT right before scheduling + a task. + - A task can then be launched using this named action with `schedule_task`. A `params` dict + can be passed, and it will be available to the registered function when launched. This task + can be launch either now-ish, or later on by giving a `timestamp` parameter. + + The function may call `update_task` at any time to update the `result` of the task, + and this can be used to resume the task at a specific point and/or to convey a result to + the code launching the task. + You can also specify the `result` (and/or an `error`) when returning from the function. + + The reconciliation loop runs every 5 mns, so this is not a precise scheduler. When wanting + to launch now, the launch will still not happen before the next loop run. + + Tasks will be run on the worker specified with `run_background_tasks_on` config, + or the main one by default. + There is a limit of 10 concurrent tasks, so tasks may be delayed if the pool is already + full. In this regard, please take great care that scheduled tasks can actually finished. + For now there is no mechanism to stop a running task if it is stuck. + """ + + # Precision of the scheduler, evaluation of tasks to run will only happen + # every `SCHEDULE_INTERVAL_MS` ms + SCHEDULE_INTERVAL_MS = 1 * 60 * 1000 # 1mn + # Time before a complete or failed task is deleted from the DB + KEEP_TASKS_FOR_MS = 7 * 24 * 60 * 60 * 1000 # 1 week + # Maximum number of tasks that can run at the same time + MAX_CONCURRENT_RUNNING_TASKS = 10 + # Time from the last task update after which we will log a warning + LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000 # 24hrs + + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastores().main + self._clock = hs.get_clock() + self._running_tasks: Set[str] = set() + # A map between action names and their registered function + self._actions: Dict[ + str, + Callable[ + [ScheduledTask, bool], + Awaitable[Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], + ], + ] = {} + self._run_background_tasks = hs.config.worker.run_background_tasks + + if self._run_background_tasks: + self._clock.looping_call( + run_as_background_process, + TaskScheduler.SCHEDULE_INTERVAL_MS, + "handle_scheduled_tasks", + self._handle_scheduled_tasks, + ) + + def register_action( + self, + function: Callable[ + [ScheduledTask, bool], + Awaitable[Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], + ], + action_name: str, + ) -> None: + """Register a function to be executed when an action is scheduled with + the specified action name. + + Actions need to be registered as early as possible so that a resumed action + can find its matching function. It's usually better to NOT do that right before + calling `schedule_task` but rather in an `__init__` method. + + Args: + function: The function to be executed for this action. The parameters + passed to the function when launched are the `ScheduledTask` being run, + and a `first_launch` boolean to signal if it's a resumed task or the first + launch of it. The function should return a tuple of new `status`, `result` + and `error` as specified in `ScheduledTask`. + action_name: The name of the action to be associated with the function + """ + self._actions[action_name] = function + + async def schedule_task( + self, + action: str, + *, + resource_id: Optional[str] = None, + timestamp: Optional[int] = None, + params: Optional[JsonMapping] = None, + ) -> str: + """Schedule a new potentially resumable task. A function matching the specified + `action` should have been previously registered with `register_action`. + + Args: + action: the name of a previously registered action + resource_id: a task can be associated with a resource id to facilitate + getting all tasks associated with a specific resource + timestamp: if `None`, the task will be launched as soon as possible, otherwise it + will be launch as soon as possible after the `timestamp` value. + Note that this scheduler is not meant to be precise, and the scheduling + could be delayed if too many tasks are already running + params: a set of parameters that can be easily accessed from inside the + executed function + + Returns: + The id of the scheduled task + """ + if action not in self._actions: + raise Exception( + f"No function associated with action {action} of the scheduled task" + ) + + if timestamp is None or timestamp < self._clock.time_msec(): + timestamp = self._clock.time_msec() + + task = ScheduledTask( + random_string(16), + action, + TaskStatus.SCHEDULED, + timestamp, + resource_id, + params, + result=None, + error=None, + ) + await self._store.insert_scheduled_task(task) + + return task.id + + async def update_task( + self, + id: str, + *, + timestamp: Optional[int] = None, + status: Optional[TaskStatus] = None, + result: Optional[JsonMapping] = None, + error: Optional[str] = None, + ) -> bool: + """Update some task associated values. This is exposed publically so it can + be used inside task functions, mainly to update the result and be able to + resume a task at a specific step after a restart of synapse. + + It can also be used to stage a task, by setting the `status` to `SCHEDULED` with + a new timestamp. + + The `status` can only be set to `ACTIVE` or `SCHEDULED`, `COMPLETE` and `FAILED` + are terminal status and can only be set by returning it in the function. + + Args: + id: the id of the task to update + timestamp: useful to schedule a new stage of the task at a later date + status: the new `TaskStatus` of the task + result: the new result of the task + error: the new error of the task + """ + if status == TaskStatus.COMPLETE or status == TaskStatus.FAILED: + raise Exception( + "update_task can't be called with a FAILED or COMPLETE status" + ) + + if timestamp is None: + timestamp = self._clock.time_msec() + return await self._store.update_scheduled_task( + id, + timestamp, + status=status, + result=result, + error=error, + ) + + async def get_task(self, id: str) -> Optional[ScheduledTask]: + """Get a specific task description by id. + + Args: + id: the id of the task to retrieve + + Returns: + The task information or `None` if it doesn't exist or it has + already been removed because it's too old. + """ + return await self._store.get_scheduled_task(id) + + async def get_tasks( + self, + *, + actions: Optional[List[str]] = None, + resource_id: Optional[str] = None, + statuses: Optional[List[TaskStatus]] = None, + max_timestamp: Optional[int] = None, + ) -> List[ScheduledTask]: + """Get a list of tasks. Returns all the tasks if no args is provided. + + If an arg is `None` all tasks matching the other args will be selected. + If an arg is an empty list, the corresponding value of the task needs + to be `None` to be selected. + + Args: + actions: Limit the returned tasks to those specific action names + resource_id: Limit the returned tasks to the specific resource id, if specified + statuses: Limit the returned tasks to the specific statuses + max_timestamp: Limit the returned tasks to the ones that have + a timestamp inferior to the specified one + + Returns + A list of `ScheduledTask`, ordered by increasing timestamps + """ + return await self._store.get_scheduled_tasks( + actions=actions, + resource_id=resource_id, + statuses=statuses, + max_timestamp=max_timestamp, + ) + + async def delete_task(self, id: str) -> None: + """Delete a task. Running tasks can't be deleted. + + Can only be called from the worker handling the task scheduling. + + Args: + id: id of the task to delete + """ + if self.task_is_running(id): + raise Exception(f"Task {id} is currently running and can't be deleted") + await self._store.delete_scheduled_task(id) + + def task_is_running(self, id: str) -> bool: + """Check if a task is currently running. + + Can only be called from the worker handling the task scheduling. + + Args: + id: id of the task to check + """ + assert self._run_background_tasks + return id in self._running_tasks + + async def _handle_scheduled_tasks(self) -> None: + """Main loop taking care of launching tasks and cleaning up old ones.""" + await self._launch_scheduled_tasks() + await self._clean_scheduled_tasks() + + async def _launch_scheduled_tasks(self) -> None: + """Retrieve and launch scheduled tasks that should be running at that time.""" + for task in await self.get_tasks(statuses=[TaskStatus.ACTIVE]): + if not self.task_is_running(task.id): + if ( + len(self._running_tasks) + < TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS + ): + await self._launch_task(task, first_launch=False) + else: + if ( + self._clock.time_msec() + > task.timestamp + TaskScheduler.LAST_UPDATE_BEFORE_WARNING_MS + ): + logger.warn( + f"Task {task.id} (action {task.action}) has seen no update for more than 24h and may be stuck" + ) + for task in await self.get_tasks( + statuses=[TaskStatus.SCHEDULED], max_timestamp=self._clock.time_msec() + ): + if ( + not self.task_is_running(task.id) + and len(self._running_tasks) + < TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS + ): + await self._launch_task(task, first_launch=True) + + running_tasks_gauge.set(len(self._running_tasks)) + + async def _clean_scheduled_tasks(self) -> None: + """Clean old complete or failed jobs to avoid clutter the DB.""" + for task in await self._store.get_scheduled_tasks( + statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE] + ): + # FAILED and COMPLETE tasks should never be running + assert not self.task_is_running(task.id) + if ( + self._clock.time_msec() + > task.timestamp + TaskScheduler.KEEP_TASKS_FOR_MS + ): + await self._store.delete_scheduled_task(task.id) + + async def _launch_task(self, task: ScheduledTask, first_launch: bool) -> None: + """Launch a scheduled task now. + + Args: + task: the task to launch + first_launch: `True` if it's the first time is launched, `False` otherwise + """ + assert task.action in self._actions + + function = self._actions[task.action] + + async def wrapper() -> None: + try: + (status, result, error) = await function(task, first_launch) + except Exception: + f = Failure() + logger.error( + f"scheduled task {task.id} failed", + exc_info=(f.type, f.value, f.getTracebackObject()), + ) + status = TaskStatus.FAILED + result = None + error = f.getErrorMessage() + + await self._store.update_scheduled_task( + task.id, + self._clock.time_msec(), + status=status, + result=result, + error=error, + ) + self._running_tasks.remove(task.id) + + self._running_tasks.add(task.id) + await self.update_task(task.id, status=TaskStatus.ACTIVE) + description = f"{task.id}-{task.action}" + run_as_background_process(description, wrapper) diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py new file mode 100644 index 0000000000..3a97559bf0 --- /dev/null +++ b/tests/util/test_task_scheduler.py @@ -0,0 +1,186 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +from twisted.internet.task import deferLater +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.types import JsonMapping, ScheduledTask, TaskStatus +from synapse.util import Clock +from synapse.util.task_scheduler import TaskScheduler + +from tests import unittest + + +class TestTaskScheduler(unittest.HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.task_scheduler = hs.get_task_scheduler() + self.task_scheduler.register_action(self._test_task, "_test_task") + self.task_scheduler.register_action(self._sleeping_task, "_sleeping_task") + self.task_scheduler.register_action(self._raising_task, "_raising_task") + self.task_scheduler.register_action(self._resumable_task, "_resumable_task") + + async def _test_task( + self, task: ScheduledTask, first_launch: bool + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + # This test task will copy the parameters to the result + result = None + if task.params: + result = task.params + return (TaskStatus.COMPLETE, result, None) + + def test_schedule_task(self) -> None: + """Schedule a task in the future with some parameters to be copied as a result and check it executed correctly. + Also check that it get removed after `KEEP_TASKS_FOR_MS`.""" + timestamp = self.clock.time_msec() + 30 * 1000 + task_id = self.get_success( + self.task_scheduler.schedule_task( + "_test_task", + timestamp=timestamp, + params={"val": 1}, + ) + ) + + task = self.get_success(self.task_scheduler.get_task(task_id)) + assert task is not None + self.assertEqual(task.status, TaskStatus.SCHEDULED) + self.assertIsNone(task.result) + + # The timestamp being 30s after now the task should been executed + # after the first scheduling loop is run + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000) + + task = self.get_success(self.task_scheduler.get_task(task_id)) + assert task is not None + self.assertEqual(task.status, TaskStatus.COMPLETE) + assert task.result is not None + # The passed parameter should have been copied to the result + self.assertTrue(task.result.get("val") == 1) + + # Let's wait for the complete task to be deleted and hence unavailable + self.reactor.advance((TaskScheduler.KEEP_TASKS_FOR_MS / 1000) + 1) + + task = self.get_success(self.task_scheduler.get_task(task_id)) + self.assertIsNone(task) + + async def _sleeping_task( + self, task: ScheduledTask, first_launch: bool + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + # Sleep for a second + await deferLater(self.reactor, 1, lambda: None) + return TaskStatus.COMPLETE, None, None + + def test_schedule_lot_of_tasks(self) -> None: + """Schedule more than `TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS` tasks and check the behavior.""" + timestamp = self.clock.time_msec() + 30 * 1000 + task_ids = [] + for i in range(TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS + 1): + task_ids.append( + self.get_success( + self.task_scheduler.schedule_task( + "_sleeping_task", + timestamp=timestamp, + params={"val": i}, + ) + ) + ) + + # The timestamp being 30s after now the task should been executed + # after the first scheduling loop is run + self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + + # This is to give the time to the sleeping tasks to finish + self.reactor.advance(1) + + # Check that only MAX_CONCURRENT_RUNNING_TASKS tasks has run and that one + # is still scheduled. + tasks = [ + self.get_success(self.task_scheduler.get_task(task_id)) + for task_id in task_ids + ] + + self.assertEquals( + len( + [t for t in tasks if t is not None and t.status == TaskStatus.COMPLETE] + ), + TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS, + ) + + scheduled_tasks = [ + t for t in tasks if t is not None and t.status == TaskStatus.SCHEDULED + ] + self.assertEquals(len(scheduled_tasks), 1) + + self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + self.reactor.advance(1) + + # Check that the last task has been properly executed after the next scheduler loop run + prev_scheduled_task = self.get_success( + self.task_scheduler.get_task(scheduled_tasks[0].id) + ) + assert prev_scheduled_task is not None + self.assertEquals( + prev_scheduled_task.status, + TaskStatus.COMPLETE, + ) + + async def _raising_task( + self, task: ScheduledTask, first_launch: bool + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + raise Exception("raising") + + def test_schedule_raising_task(self) -> None: + """Schedule a task raising an exception and check it runs to failure and report exception content.""" + task_id = self.get_success(self.task_scheduler.schedule_task("_raising_task")) + + self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + + task = self.get_success(self.task_scheduler.get_task(task_id)) + assert task is not None + self.assertEqual(task.status, TaskStatus.FAILED) + self.assertEqual(task.error, "raising") + + async def _resumable_task( + self, task: ScheduledTask, first_launch: bool + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + if task.result and "in_progress" in task.result: + return TaskStatus.COMPLETE, {"success": True}, None + else: + await self.task_scheduler.update_task(task.id, result={"in_progress": True}) + # Await forever to simulate an aborted task because of a restart + await deferLater(self.reactor, 2**16, lambda: None) + # This should never been called + return TaskStatus.ACTIVE, None, None + + def test_schedule_resumable_task(self) -> None: + """Schedule a resumable task and check that it gets properly resumed and complete after simulating a synapse restart.""" + task_id = self.get_success(self.task_scheduler.schedule_task("_resumable_task")) + + self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + + task = self.get_success(self.task_scheduler.get_task(task_id)) + assert task is not None + self.assertEqual(task.status, TaskStatus.ACTIVE) + + # Simulate a synapse restart by emptying the list of running tasks + self.task_scheduler._running_tasks = set() + self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + + task = self.get_success(self.task_scheduler.get_task(task_id)) + assert task is not None + self.assertEqual(task.status, TaskStatus.COMPLETE) + assert task.result is not None + self.assertTrue(task.result.get("success")) From d6ae4041a4c014a8c234f1afccc80867bf5b7df0 Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Mon, 21 Aug 2023 21:32:17 +0200 Subject: [PATCH 340/562] Add `client_secret_path` as alternative for `client_secret` for OIDC config (#16030) --- changelog.d/16030.feature | 1 + docs/usage/configuration/config_documentation.md | 8 ++++++++ synapse/config/oidc.py | 16 +++++++++++++++- 3 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16030.feature diff --git a/changelog.d/16030.feature b/changelog.d/16030.feature new file mode 100644 index 0000000000..c2f068085f --- /dev/null +++ b/changelog.d/16030.feature @@ -0,0 +1 @@ +Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 6601bba9f2..743c51d76a 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3204,6 +3204,14 @@ Options for each entry include: * `client_secret`: oauth2 client secret to use. May be omitted if `client_secret_jwt_key` is given, or if `client_auth_method` is 'none'. + Must be omitted if `client_secret_path` is specified. + +* `client_secret_path`: path to the oauth2 client secret to use. With that + it's not necessary to leak secrets into the config file itself. + Mutually exclusive with `client_secret`. Can be omitted if + `client_secret_jwt_key` is specified. + + *Added in Synapse 1.91.0.* * `client_secret_jwt_key`: Alternative to client_secret: details of a key used to create a JSON Web Token to be used as an OAuth2 client secret. If diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index 77c1d1dc8e..574d6afb95 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -280,6 +280,20 @@ def _parse_oidc_config_dict( for x in oidc_config.get("attribute_requirements", []) ] + # Read from either `client_secret_path` or `client_secret`. If both exist, error. + client_secret = oidc_config.get("client_secret") + client_secret_path = oidc_config.get("client_secret_path") + if client_secret_path is not None: + if client_secret is None: + client_secret = read_file( + client_secret_path, config_path + ("client_secret_path",) + ).rstrip("\n") + else: + raise ConfigError( + "Cannot specify both client_secret and client_secret_path", + config_path + ("client_secret",), + ) + return OidcProviderConfig( idp_id=idp_id, idp_name=oidc_config.get("idp_name", "OIDC"), @@ -288,7 +302,7 @@ def _parse_oidc_config_dict( discover=oidc_config.get("discover", True), issuer=oidc_config["issuer"], client_id=oidc_config["client_id"], - client_secret=oidc_config.get("client_secret"), + client_secret=client_secret, client_secret_jwt_key=client_secret_jwt_key, client_auth_method=oidc_config.get("client_auth_method", "client_secret_basic"), pkce_method=oidc_config.get("pkce_method", "auto"), From 7dbac123f98a2d59d09a63efe4543ee850a8d630 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Tue, 22 Aug 2023 12:42:08 +0100 Subject: [PATCH 341/562] Disallow user_consent where experimental MSC3861 is enabled (#16127) --- changelog.d/16127.bugfix | 1 + synapse/config/experimental.py | 7 +++++++ tests/config/test_oauth_delegation.py | 16 ++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 changelog.d/16127.bugfix diff --git a/changelog.d/16127.bugfix b/changelog.d/16127.bugfix new file mode 100644 index 0000000000..0308fdfd45 --- /dev/null +++ b/changelog.d/16127.bugfix @@ -0,0 +1 @@ +User consent features cannot be enabled when using experimental MSC3861. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index ac9449b18f..d4cf9a0555 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -173,6 +173,13 @@ class MSC3861: ("enable_registration",), ) + # We only need to test the user consent version, as if it must be set if the user_consent section was present in the config + if root.consent.user_consent_version is not None: + raise ConfigError( + "User consent cannot be enabled when OAuth delegation is enabled", + ("user_consent",), + ) + if ( root.oidc.oidc_enabled or root.saml2.saml2_enabled diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index f57c813a58..35f7b85dc7 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from unittest.mock import Mock from synapse.config import ConfigError @@ -167,6 +168,21 @@ class MSC3861OAuthDelegation(TestCase): with self.assertRaises(ConfigError): self.parse_config() + def test_user_consent_cannot_be_enabled(self) -> None: + tmpdir = self.mktemp() + os.mkdir(tmpdir) + self.config_dict["user_consent"] = { + "require_at_registration": True, + "version": "1", + "template_dir": tmpdir, + "server_notice_content": { + "msgtype": "m.text", + "body": "foo", + }, + } + with self.assertRaises(ConfigError): + self.parse_config() + def test_password_config_cannot_be_enabled(self) -> None: self.config_dict["password_config"] = {"enabled": True} with self.assertRaises(ConfigError): From 6d7c63fcc6e4e8f5bb24f471c5308d4cf4acafab Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 22 Aug 2023 07:46:32 -0400 Subject: [PATCH 342/562] Properly call setup_background_tasks in unit tests. (#16150) This should only be called on HomeServer objects which are configured to run background tasks, which is automatically (and properly) done via the call to setup(). --- changelog.d/16150.misc | 1 + tests/server.py | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/16150.misc diff --git a/changelog.d/16150.misc b/changelog.d/16150.misc new file mode 100644 index 0000000000..97861282fd --- /dev/null +++ b/changelog.d/16150.misc @@ -0,0 +1 @@ +Clean-up calling `setup_background_tasks` in unit tests. diff --git a/tests/server.py b/tests/server.py index 481fe34c5c..ff03d28864 100644 --- a/tests/server.py +++ b/tests/server.py @@ -1000,8 +1000,6 @@ def setup_test_homeserver( hs.tls_server_context_factory = Mock() hs.setup() - if homeserver_to_use == TestHomeServer: - hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): database_pool = hs.get_datastores().databases[0] From bc72d803d5874112cb3acf65bb3081023cccdc28 Mon Sep 17 00:00:00 2001 From: Theodore Ni <3806110+tjni@users.noreply.github.com> Date: Tue, 22 Aug 2023 05:51:35 -0700 Subject: [PATCH 343/562] Raise poetry-core version cap to 1.7.0 (#16152) --- changelog.d/16152.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16152.misc diff --git a/changelog.d/16152.misc b/changelog.d/16152.misc new file mode 100644 index 0000000000..f8bf9f2c52 --- /dev/null +++ b/changelog.d/16152.misc @@ -0,0 +1 @@ +Raised the poetry-core version cap to 1.7.0. diff --git a/pyproject.toml b/pyproject.toml index 86680cb8e5..0585a9b01e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -367,7 +367,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"] +requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.6.0"] build-backend = "poetry.core.masonry.api" From b657e89005bc48e5e061d63ae35e12bf23b81d88 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 22 Aug 2023 09:08:24 -0400 Subject: [PATCH 344/562] Fix user directory test for deactivated support user. (#16157) Support users should not be added to the user directory after being deactivated. --- changelog.d/16157.misc | 1 + tests/handlers/test_user_directory.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 8 deletions(-) create mode 100644 changelog.d/16157.misc diff --git a/changelog.d/16157.misc b/changelog.d/16157.misc new file mode 100644 index 0000000000..c9d8999cca --- /dev/null +++ b/changelog.d/16157.misc @@ -0,0 +1 @@ +Fix assertion in user directory unit tests. diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 9785dd698b..430209705e 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -446,6 +446,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): self.assertIsNone(profile) def test_handle_user_deactivated_support_user(self) -> None: + """Ensure a support user doesn't get added to the user directory after deactivation.""" s_user_id = "@support:test" self.get_success( self.store.register_user( @@ -453,14 +454,16 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): ) ) - mock_remove_from_user_dir = Mock(return_value=make_awaitable(None)) - with patch.object( - self.store, "remove_from_user_dir", mock_remove_from_user_dir - ): - self.get_success(self.handler.handle_local_user_deactivated(s_user_id)) - # BUG: the correct spelling is assert_not_called, but that makes the test fail - # and it's not clear that this is actually the behaviour we want. - mock_remove_from_user_dir.not_called() + # The profile should not be in the directory. + profile = self.get_success(self.store._get_user_in_directory(s_user_id)) + self.assertIsNone(profile) + + # Remove the user from the directory. + self.get_success(self.handler.handle_local_user_deactivated(s_user_id)) + + # The profile should still not be in the user directory. + profile = self.get_success(self.store._get_user_in_directory(s_user_id)) + self.assertIsNone(profile) def test_handle_user_deactivated_regular_user(self) -> None: r_user_id = "@regular:test" From 8aa5479986d910c2b404e20301911ebc77f969c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 09:48:06 -0400 Subject: [PATCH 345/562] Bump serde from 1.0.183 to 1.0.184 (#16139) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79d9cefcf6..61c0f1bd04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.183" +version = "1.0.184" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" +checksum = "2c911f4b04d7385c9035407a4eff5903bf4fe270fa046fda448b69e797f4fff0" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.183" +version = "1.0.184" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816" +checksum = "c1df27f5b29406ada06609b2e2f77fb34f6dbb104a457a671cc31dbed237e09e" dependencies = [ "proc-macro2", "quote", From 69048f7b4848ab6a4ae6cb233f8cbf36d73c0ba1 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 22 Aug 2023 07:15:34 -0700 Subject: [PATCH 346/562] Add an admin endpoint to allow authorizing server to signal token revocations (#16125) --- changelog.d/16125.misc | 1 + synapse/api/auth/msc3861_delegated.py | 13 ++++ synapse/replication/tcp/client.py | 12 ++++ synapse/rest/admin/__init__.py | 3 + synapse/rest/admin/oidc.py | 55 ++++++++++++++++ synapse/storage/databases/main/cache.py | 13 ++++ synapse/storage/databases/main/devices.py | 9 +++ synapse/util/caches/expiringcache.py | 22 +++++++ tests/handlers/test_oauth_delegation.py | 34 +++++++++- .../test_intro_token_invalidation.py | 62 +++++++++++++++++++ 10 files changed, 223 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16125.misc create mode 100644 synapse/rest/admin/oidc.py create mode 100644 tests/replication/test_intro_token_invalidation.py diff --git a/changelog.d/16125.misc b/changelog.d/16125.misc new file mode 100644 index 0000000000..2f1bf23108 --- /dev/null +++ b/changelog.d/16125.misc @@ -0,0 +1 @@ +Add an admin endpoint to allow authorizing server to signal token revocations. diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 4bdfe31b22..14cba50c90 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -438,3 +438,16 @@ class MSC3861DelegatedAuth(BaseAuth): scope=scope, is_guest=(has_guest_scope and not has_user_scope), ) + + def invalidate_cached_tokens(self, keys: List[str]) -> None: + """ + Invalidate the entry(s) in the introspection token cache corresponding to the given key + """ + for key in keys: + self._token_cache.invalidate(key) + + def invalidate_token_cache(self) -> None: + """ + Invalidate the entire token cache. + """ + self._token_cache.invalidate_all() diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 139f57cf86..04e8cff6ea 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -26,6 +26,7 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yielda from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.streams import ( AccountDataStream, + CachesStream, DeviceListsStream, PushersStream, PushRulesStream, @@ -73,6 +74,7 @@ class ReplicationDataHandler: self._instance_name = hs.get_instance_name() self._typing_handler = hs.get_typing_handler() self._state_storage_controller = hs.get_storage_controllers().state + self.auth = hs.get_auth() self._notify_pushers = hs.config.worker.start_pushers self._pusher_pool = hs.get_pusherpool() @@ -218,6 +220,16 @@ class ReplicationDataHandler: self._state_storage_controller.notify_event_un_partial_stated( row.event_id ) + # invalidate the introspection token cache + elif stream_name == CachesStream.NAME: + for row in rows: + if row.cache_func == "introspection_token_invalidation": + if row.keys[0] is None: + # invalidate the whole cache + # mypy ignore - the token cache is defined on MSC3861DelegatedAuth + self.auth.invalidate_token_cache() # type: ignore[attr-defined] + else: + self.auth.invalidate_cached_tokens(row.keys) # type: ignore[attr-defined] await self._presence_handler.process_replication_rows( stream_name, instance_name, token, rows diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index fe8177ed4d..55e752fda8 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -47,6 +47,7 @@ from synapse.rest.admin.federation import ( ListDestinationsRestServlet, ) from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo +from synapse.rest.admin.oidc import OIDCTokenRevocationRestServlet from synapse.rest.admin.registration_tokens import ( ListRegistrationTokensRestServlet, NewRegistrationTokenRestServlet, @@ -297,6 +298,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: BackgroundUpdateRestServlet(hs).register(http_server) BackgroundUpdateStartJobRestServlet(hs).register(http_server) ExperimentalFeaturesRestServlet(hs).register(http_server) + if hs.config.experimental.msc3861.enabled: + OIDCTokenRevocationRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource( diff --git a/synapse/rest/admin/oidc.py b/synapse/rest/admin/oidc.py new file mode 100644 index 0000000000..64d2d40550 --- /dev/null +++ b/synapse/rest/admin/oidc.py @@ -0,0 +1,55 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from http import HTTPStatus +from typing import TYPE_CHECKING, Dict, Tuple + +from synapse.http.servlet import RestServlet +from synapse.http.site import SynapseRequest +from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class OIDCTokenRevocationRestServlet(RestServlet): + """ + Delete a given token introspection response - identified by the `jti` field - from the + introspection token cache when a token is revoked at the authorizing server + """ + + PATTERNS = admin_patterns("/OIDC_token_revocation/(?P[^/]*)") + + def __init__(self, hs: "HomeServer"): + super().__init__() + auth = hs.get_auth() + + # If this endpoint is loaded then we must have enabled delegated auth. + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + assert isinstance(auth, MSC3861DelegatedAuth) + + self.auth = auth + self.store = hs.get_datastores().main + + async def on_DELETE( + self, request: SynapseRequest, token_id: str + ) -> Tuple[HTTPStatus, Dict]: + await assert_requester_is_admin(self.auth, request) + + self.auth._token_cache.invalidate(token_id) + + # make sure we invalidate the cache on any workers + await self.store.stream_introspection_token_invalidation((token_id,)) + + return HTTPStatus.OK, {} diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 2fbd389c71..18905e07b6 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -584,6 +584,19 @@ class CacheInvalidationWorkerStore(SQLBaseStore): else: return 0 + async def stream_introspection_token_invalidation( + self, key: Tuple[Optional[str]] + ) -> None: + """ + Stream an invalidation request for the introspection token cache to workers + + Args: + key: token_id of the introspection token to remove from the cache + """ + await self.send_invalidation_to_replication( + "introspection_token_invalidation", key + ) + @wrap_as_background_process("clean_up_old_cache_invalidations") async def _clean_up_cache_invalidation_wrapper(self) -> None: """ diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index e4162f846b..fa69a4a298 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -33,6 +33,7 @@ from typing_extensions import Literal from synapse.api.constants import EduTypes from synapse.api.errors import Codes, StoreError +from synapse.config.homeserver import HomeServerConfig from synapse.logging.opentracing import ( get_active_span_text_map, set_tag, @@ -1663,6 +1664,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): self.device_id_exists_cache: LruCache[ Tuple[str, str], Literal[True] ] = LruCache(cache_name="device_id_exists", max_size=10000) + self.config: HomeServerConfig = hs.config async def store_device( self, @@ -1784,6 +1786,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): for device_id in device_ids: self.device_id_exists_cache.invalidate((user_id, device_id)) + # TODO: don't nuke the entire cache once there is a way to associate + # device_id -> introspection_token + if self.config.experimental.msc3861.enabled: + # mypy ignore - the token cache is defined on MSC3861DelegatedAuth + self.auth._token_cache.invalidate_all() # type: ignore[attr-defined] + await self.stream_introspection_token_invalidation((None,)) + async def update_device( self, user_id: str, device_id: str, new_display_name: Optional[str] = None ) -> None: diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 01ad02af67..9a3e10ddee 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -140,6 +140,20 @@ class ExpiringCache(Generic[KT, VT]): return value.value + def invalidate(self, key: KT) -> None: + """ + Remove the given key from the cache. + """ + + value = self._cache.pop(key, None) + if value: + if self.iterable: + self.metrics.inc_evictions( + EvictionReason.invalidation, len(value.value) + ) + else: + self.metrics.inc_evictions(EvictionReason.invalidation) + def __contains__(self, key: KT) -> bool: return key in self._cache @@ -193,6 +207,14 @@ class ExpiringCache(Generic[KT, VT]): len(self), ) + def invalidate_all(self) -> None: + """ + Remove all items from the cache. + """ + keys = set(self._cache.keys()) + for key in keys: + self._cache.pop(key) + def __len__(self) -> int: if self.iterable: return sum(len(entry.value) for entry in self._cache.values()) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 1456b675a7..b891e84690 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -14,7 +14,7 @@ from http import HTTPStatus from typing import Any, Dict, Union -from unittest.mock import ANY, Mock +from unittest.mock import ANY, AsyncMock, Mock from urllib.parse import parse_qs from signedjson.key import ( @@ -588,6 +588,38 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(self.http_client.request.call_count, 2) + def test_revocation_endpoint(self) -> None: + # mock introspection response and then admin verification response + self.http_client.request = AsyncMock( + side_effect=[ + FakeResponse.json( + code=200, payload={"active": True, "jti": "open_sesame"} + ), + FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]), + "username": USERNAME, + }, + ), + ] + ) + + # cache a token to delete + introspection_token = self.get_success( + self.auth._introspect_token("open_sesame") # type: ignore[attr-defined] + ) + self.assertEqual(self.auth._token_cache.get("open_sesame"), introspection_token) # type: ignore[attr-defined] + + # delete the revoked token + introspection_token_id = "open_sesame" + url = f"/_synapse/admin/v1/OIDC_token_revocation/{introspection_token_id}" + channel = self.make_request("DELETE", url, access_token="mockAccessToken") + self.assertEqual(channel.code, 200) + self.assertEqual(self.auth._token_cache.get("open_sesame"), None) # type: ignore[attr-defined] + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: # We only generate a master key to simplify the test. master_signing_key = generate_signing_key(device_id) diff --git a/tests/replication/test_intro_token_invalidation.py b/tests/replication/test_intro_token_invalidation.py new file mode 100644 index 0000000000..f90678b6b1 --- /dev/null +++ b/tests/replication/test_intro_token_invalidation.py @@ -0,0 +1,62 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict + +import synapse.rest.admin._base + +from tests.replication._base import BaseMultiWorkerStreamTestCase + + +class IntrospectionTokenCacheInvalidationTestCase(BaseMultiWorkerStreamTestCase): + servlets = [synapse.rest.admin.register_servlets] + + def default_config(self) -> Dict[str, Any]: + config = super().default_config() + config["disable_registration"] = True + config["experimental_features"] = { + "msc3861": { + "enabled": True, + "issuer": "some_dude", + "client_id": "ID", + "client_auth_method": "client_secret_post", + "client_secret": "secret", + } + } + return config + + def test_stream_introspection_token_invalidation(self) -> None: + worker_hs = self.make_worker_hs("synapse.app.generic_worker") + auth = worker_hs.get_auth() + store = self.hs.get_datastores().main + + # add a token to the cache on the worker + auth._token_cache["open_sesame"] = "intro_token" # type: ignore[attr-defined] + + # stream the invalidation from the master + self.get_success( + store.stream_introspection_token_invalidation(("open_sesame",)) + ) + + # check that the cache on the worker was invalidated + self.assertEqual(auth._token_cache.get("open_sesame"), None) # type: ignore[attr-defined] + + # test invalidating whole cache + for i in range(0, 5): + auth._token_cache[f"open_sesame_{i}"] = f"intro_token_{i}" # type: ignore[attr-defined] + self.assertEqual(len(auth._token_cache), 5) # type: ignore[attr-defined] + + self.get_success(store.stream_introspection_token_invalidation((None,))) + + self.assertEqual(len(auth._token_cache), 0) # type: ignore[attr-defined] From 0ba17777be81ba9457defb407112b664042a14d2 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 22 Aug 2023 16:47:59 +0200 Subject: [PATCH 347/562] Disable `m.3pid_changes` capability when MSC3861 is enabled. (#16134) --- changelog.d/16127.bugfix | 2 +- changelog.d/16134.bugfix | 1 + synapse/config/experimental.py | 6 ++++++ synapse/config/registration.py | 11 ++++++++++- tests/config/test_oauth_delegation.py | 5 +++++ 5 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16134.bugfix diff --git a/changelog.d/16127.bugfix b/changelog.d/16127.bugfix index 0308fdfd45..9ce5f4a705 100644 --- a/changelog.d/16127.bugfix +++ b/changelog.d/16127.bugfix @@ -1 +1 @@ -User consent features cannot be enabled when using experimental MSC3861. +User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. diff --git a/changelog.d/16134.bugfix b/changelog.d/16134.bugfix new file mode 100644 index 0000000000..9ce5f4a705 --- /dev/null +++ b/changelog.d/16134.bugfix @@ -0,0 +1 @@ +User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d4cf9a0555..277ea4675b 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -223,6 +223,12 @@ class MSC3861: ("session_lifetime",), ) + if root.registration.enable_3pid_changes: + raise ConfigError( + "enable_3pid_changes cannot be enabled when OAuth delegation is enabled", + ("enable_3pid_changes",), + ) + @attr.s(auto_attribs=True, frozen=True, slots=True) class MSC3866Config: diff --git a/synapse/config/registration.py b/synapse/config/registration.py index df1d83dfaa..b8ad6fbc06 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -133,7 +133,16 @@ class RegistrationConfig(Config): self.enable_set_displayname = config.get("enable_set_displayname", True) self.enable_set_avatar_url = config.get("enable_set_avatar_url", True) - self.enable_3pid_changes = config.get("enable_3pid_changes", True) + + # The default value of enable_3pid_changes is True, unless msc3861 is enabled. + msc3861_enabled = ( + (config.get("experimental_features") or {}) + .get("msc3861", {}) + .get("enabled", False) + ) + self.enable_3pid_changes = config.get( + "enable_3pid_changes", not msc3861_enabled + ) self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py index 35f7b85dc7..5c91031746 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py @@ -271,3 +271,8 @@ class MSC3861OAuthDelegation(TestCase): self.config_dict["session_lifetime"] = "24h" with self.assertRaises(ConfigError): self.parse_config() + + def test_enable_3pid_changes_cannot_be_enabled(self) -> None: + self.config_dict["enable_3pid_changes"] = True + with self.assertRaises(ConfigError): + self.parse_config() From 803f63df1c52237a23cb68c1b2a8402200a7216d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 Aug 2023 16:11:22 +0100 Subject: [PATCH 348/562] Fix perf of `wait_for_stream_positions` (#16148) --- changelog.d/16148.bugfix | 1 + synapse/replication/tcp/client.py | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16148.bugfix diff --git a/changelog.d/16148.bugfix b/changelog.d/16148.bugfix new file mode 100644 index 0000000000..fea316f856 --- /dev/null +++ b/changelog.d/16148.bugfix @@ -0,0 +1 @@ +Fix performance degredation when there are a lot of in-flight replication requests. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 04e8cff6ea..3b88dc68ea 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -14,7 +14,9 @@ """A replication client for use by synapse workers. """ import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple + +from sortedcontainers import SortedList from twisted.internet import defer from twisted.internet.defer import Deferred @@ -86,7 +88,9 @@ class ReplicationDataHandler: # Map from stream and instance to list of deferreds waiting for the stream to # arrive at a particular position. The lists are sorted by stream position. - self._streams_to_waiters: Dict[Tuple[str, str], List[Tuple[int, Deferred]]] = {} + self._streams_to_waiters: Dict[ + Tuple[str, str], SortedList[Tuple[int, Deferred]] + ] = {} async def on_rdata( self, stream_name: str, instance_name: str, token: int, rows: list @@ -238,7 +242,9 @@ class ReplicationDataHandler: # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is # greater than the received row position. - waiting_list = self._streams_to_waiters.get((stream_name, instance_name), []) + waiting_list = self._streams_to_waiters.get((stream_name, instance_name)) + if not waiting_list: + return # Index of first item with a position after the current token, i.e we # have called all deferreds before this index. If not overwritten by @@ -262,7 +268,7 @@ class ReplicationDataHandler: # Drop all entries in the waiting list that were called in the above # loop. (This maintains the order so no need to resort) - waiting_list[:] = waiting_list[index_of_first_deferred_not_called:] + del waiting_list[:index_of_first_deferred_not_called] for deferred in deferreds_to_callback: try: @@ -322,11 +328,10 @@ class ReplicationDataHandler: ) waiting_list = self._streams_to_waiters.setdefault( - (stream_name, instance_name), [] + (stream_name, instance_name), SortedList(key=lambda t: t[0]) ) - waiting_list.append((position, deferred)) - waiting_list.sort(key=lambda t: t[0]) + waiting_list.add((position, deferred)) # We measure here to get in flight counts and average waiting time. with Measure(self._clock, "repl.wait_for_stream_position"): From 3f17178728fa4029c2504f4ceb7377dc888512ab Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 22 Aug 2023 11:43:44 -0400 Subject: [PATCH 349/562] Clean-up presence tests (#16158) Reduce duplicated code & remove unused variables. --- changelog.d/16158.misc | 1 + tests/handlers/test_presence.py | 129 +++++++++----------------------- 2 files changed, 38 insertions(+), 92 deletions(-) create mode 100644 changelog.d/16158.misc diff --git a/changelog.d/16158.misc b/changelog.d/16158.misc new file mode 100644 index 0000000000..41059378c5 --- /dev/null +++ b/changelog.d/16158.misc @@ -0,0 +1 @@ +Improve presence tests. diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index fd66d573d2..1f483eb75a 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -514,6 +514,9 @@ class PresenceTimeoutTestCase(unittest.TestCase): class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): + user_id = "@test:server" + user_id_obj = UserID.from_string(user_id) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() @@ -523,12 +526,11 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): we time out their syncing users presence. """ process_id = "1" - user_id = "@test:server" # Notify handler that a user is now syncing. self.get_success( self.presence_handler.update_external_syncs_row( - process_id, user_id, True, self.clock.time_msec() + process_id, self.user_id, True, self.clock.time_msec() ) ) @@ -536,48 +538,37 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # stopped syncing that their presence state doesn't get timed out. self.reactor.advance(EXTERNAL_PROCESS_EXPIRY / 2) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) self.assertEqual(state.state, PresenceState.ONLINE) # Check that if the external process timeout fires, then the syncing # user gets timed out self.reactor.advance(EXTERNAL_PROCESS_EXPIRY) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) self.assertEqual(state.state, PresenceState.OFFLINE) def test_user_goes_offline_by_timeout_status_msg_remain(self) -> None: """Test that if a user doesn't update the records for a while users presence goes `OFFLINE` because of timeout and `status_msg` remains. """ - user_id = "@test:server" status_msg = "I'm here!" # Mark user as online - self._set_presencestate_with_status_msg( - user_id, PresenceState.ONLINE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.ONLINE, status_msg) # Check that if we wait a while without telling the handler the user has # stopped syncing that their presence state doesn't get timed out. self.reactor.advance(SYNC_ONLINE_TIMEOUT / 2) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) self.assertEqual(state.state, PresenceState.ONLINE) self.assertEqual(state.status_msg, status_msg) # Check that if the timeout fires, then the syncing user gets timed out self.reactor.advance(SYNC_ONLINE_TIMEOUT) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) # status_msg should remain even after going offline self.assertEqual(state.state, PresenceState.OFFLINE) self.assertEqual(state.status_msg, status_msg) @@ -586,24 +577,19 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): """Test that if a user change presence manually to `OFFLINE` and no status is set, that `status_msg` is `None`. """ - user_id = "@test:server" status_msg = "I'm here!" # Mark user as online - self._set_presencestate_with_status_msg( - user_id, PresenceState.ONLINE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.ONLINE, status_msg) # Mark user as offline self.get_success( self.presence_handler.set_state( - UserID.from_string(user_id), {"presence": PresenceState.OFFLINE} + self.user_id_obj, {"presence": PresenceState.OFFLINE} ) ) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) self.assertEqual(state.state, PresenceState.OFFLINE) self.assertEqual(state.status_msg, None) @@ -611,41 +597,31 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): """Test that if a user change presence manually to `OFFLINE` and a status is set, that `status_msg` appears. """ - user_id = "@test:server" status_msg = "I'm here!" # Mark user as online - self._set_presencestate_with_status_msg( - user_id, PresenceState.ONLINE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.ONLINE, status_msg) # Mark user as offline - self._set_presencestate_with_status_msg( - user_id, PresenceState.OFFLINE, "And now here." - ) + self._set_presencestate_with_status_msg(PresenceState.OFFLINE, "And now here.") def test_user_reset_online_with_no_status(self) -> None: """Test that if a user set again the presence manually and no status is set, that `status_msg` is `None`. """ - user_id = "@test:server" status_msg = "I'm here!" # Mark user as online - self._set_presencestate_with_status_msg( - user_id, PresenceState.ONLINE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.ONLINE, status_msg) # Mark user as online again self.get_success( self.presence_handler.set_state( - UserID.from_string(user_id), {"presence": PresenceState.ONLINE} + self.user_id_obj, {"presence": PresenceState.ONLINE} ) ) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) # status_msg should remain even after going offline self.assertEqual(state.state, PresenceState.ONLINE) self.assertEqual(state.status_msg, None) @@ -654,33 +630,27 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): """Test that if a user set again the presence manually and status is `None`, that `status_msg` is `None`. """ - user_id = "@test:server" status_msg = "I'm here!" # Mark user as online - self._set_presencestate_with_status_msg( - user_id, PresenceState.ONLINE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.ONLINE, status_msg) # Mark user as online and `status_msg = None` - self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None) + self._set_presencestate_with_status_msg(PresenceState.ONLINE, None) def test_set_presence_from_syncing_not_set(self) -> None: """Test that presence is not set by syncing if affect_presence is false""" - user_id = "@test:server" status_msg = "I'm here!" - self._set_presencestate_with_status_msg( - user_id, PresenceState.UNAVAILABLE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.UNAVAILABLE, status_msg) self.get_success( - self.presence_handler.user_syncing(user_id, False, PresenceState.ONLINE) + self.presence_handler.user_syncing( + self.user_id, False, PresenceState.ONLINE + ) ) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) # we should still be unavailable self.assertEqual(state.state, PresenceState.UNAVAILABLE) # and status message should still be the same @@ -688,50 +658,34 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): def test_set_presence_from_syncing_is_set(self) -> None: """Test that presence is set by syncing if affect_presence is true""" - user_id = "@test:server" status_msg = "I'm here!" - self._set_presencestate_with_status_msg( - user_id, PresenceState.UNAVAILABLE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.UNAVAILABLE, status_msg) self.get_success( - self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE) + self.presence_handler.user_syncing(self.user_id, True, PresenceState.ONLINE) ) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) # we should now be online self.assertEqual(state.state, PresenceState.ONLINE) def test_set_presence_from_syncing_keeps_status(self) -> None: """Test that presence set by syncing retains status message""" - user_id = "@test:server" status_msg = "I'm here!" - self._set_presencestate_with_status_msg( - user_id, PresenceState.UNAVAILABLE, status_msg - ) + self._set_presencestate_with_status_msg(PresenceState.UNAVAILABLE, status_msg) self.get_success( - self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE) + self.presence_handler.user_syncing(self.user_id, True, PresenceState.ONLINE) ) - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) # our status message should be the same as it was before self.assertEqual(state.status_msg, status_msg) @parameterized.expand([(False,), (True,)]) - @unittest.override_config( - { - "experimental_features": { - "msc3026_enabled": True, - }, - } - ) + @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) def test_set_presence_from_syncing_keeps_busy( self, test_with_workers: bool ) -> None: @@ -741,7 +695,6 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): test_with_workers: If True, check the presence state of the user by calling /sync against a worker, rather than the main process. """ - user_id = "@test:server" status_msg = "I'm busy!" # By default, we call /sync against the main process. @@ -755,44 +708,39 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): ) # Set presence to BUSY - self._set_presencestate_with_status_msg(user_id, PresenceState.BUSY, status_msg) + self._set_presencestate_with_status_msg(PresenceState.BUSY, status_msg) # Perform a sync with a presence state other than busy. This should NOT change # our presence status; we only change from busy if we explicitly set it via # /presence/*. self.get_success( worker_to_sync_against.get_presence_handler().user_syncing( - user_id, True, PresenceState.ONLINE + self.user_id, True, PresenceState.ONLINE ) ) # Check against the main process that the user's presence did not change. - state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) # we should still be busy self.assertEqual(state.state, PresenceState.BUSY) def _set_presencestate_with_status_msg( - self, user_id: str, state: str, status_msg: Optional[str] + self, state: str, status_msg: Optional[str] ) -> None: """Set a PresenceState and status_msg and check the result. Args: - user_id: User for that the status is to be set. state: The new PresenceState. status_msg: Status message that is to be set. """ self.get_success( self.presence_handler.set_state( - UserID.from_string(user_id), + self.user_id_obj, {"presence": state, "status_msg": status_msg}, ) ) - new_state = self.get_success( - self.presence_handler.get_state(UserID.from_string(user_id)) - ) + new_state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) self.assertEqual(new_state.state, state) self.assertEqual(new_state.status_msg, status_msg) @@ -952,9 +900,6 @@ class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): self.assertEqual(upto_token, now_token) self.assertFalse(limited) - expected_rows = [ - (2, ("dest3", "@user3:test")), - ] self.assertCountEqual(rows, []) prev_token = self.queue.get_current_token(self.instance_name) From 3b3fed7229c8110870aefd4de740724fc607a46c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Aug 2023 09:23:22 +0100 Subject: [PATCH 350/562] Increase perf of read/write locks (#16149) We do this by marking the tables as `UNLOGGED` in PostgreSQL. --- changelog.d/16149.misc | 1 + .../02_read_write_locks_unlogged.sql.postgres | 30 +++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 changelog.d/16149.misc create mode 100644 synapse/storage/schema/main/delta/80/02_read_write_locks_unlogged.sql.postgres diff --git a/changelog.d/16149.misc b/changelog.d/16149.misc new file mode 100644 index 0000000000..8b6674d2aa --- /dev/null +++ b/changelog.d/16149.misc @@ -0,0 +1 @@ +Increase performance of read/write locks. diff --git a/synapse/storage/schema/main/delta/80/02_read_write_locks_unlogged.sql.postgres b/synapse/storage/schema/main/delta/80/02_read_write_locks_unlogged.sql.postgres new file mode 100644 index 0000000000..5b5dbf2687 --- /dev/null +++ b/synapse/storage/schema/main/delta/80/02_read_write_locks_unlogged.sql.postgres @@ -0,0 +1,30 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Mark the worker_read_write_locks* tables as UNLOGGED, to increase +-- performance. This means that we don't replicate the tables, and they get +-- truncated on a crash. This is acceptable as a) in those cases it's likely +-- that Synapse needs to be stopped/restarted anyway, and b) the locks are +-- considered best-effort anyway. + +-- We need to remove and recreate the circular foreign key references, as +-- UNLOGGED tables can't reference normal tables. +ALTER TABLE worker_read_write_locks_mode DROP CONSTRAINT IF EXISTS worker_read_write_locks_mode_foreign; + +ALTER TABLE worker_read_write_locks SET UNLOGGED; +ALTER TABLE worker_read_write_locks_mode SET UNLOGGED; + +ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign + FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED; From dffe095642b071dcac4907cc97944886e9fbd5b2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Aug 2023 09:23:41 +0100 Subject: [PATCH 351/562] Only lock when we're backfilling (#16159) --- changelog.d/16159.misc | 1 + synapse/handlers/federation.py | 35 +++-- synapse/handlers/pagination.py | 273 ++++++++++++++++----------------- 3 files changed, 160 insertions(+), 149 deletions(-) create mode 100644 changelog.d/16159.misc diff --git a/changelog.d/16159.misc b/changelog.d/16159.misc new file mode 100644 index 0000000000..04cdd1afaf --- /dev/null +++ b/changelog.d/16159.misc @@ -0,0 +1 @@ +Reduce scope of locks when paginating to alleviate DB contention. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2b93b8c621..29cd45550a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -60,6 +60,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError +from synapse.handlers.pagination import PURGE_PAGINATION_LOCK_NAME from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace @@ -152,6 +153,7 @@ class FederationHandler: self._device_handler = hs.get_device_handler() self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator() self._notifier = hs.get_notifier() + self._worker_locks = hs.get_worker_locks_handler() self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client( hs @@ -200,7 +202,7 @@ class FederationHandler: @trace @tag_args async def maybe_backfill( - self, room_id: str, current_depth: int, limit: int + self, room_id: str, current_depth: int, limit: int, record_time: bool = True ) -> bool: """Checks the database to see if we should backfill before paginating, and if so do. @@ -213,21 +215,25 @@ class FederationHandler: limit: The number of events that the pagination request will return. This is used as part of the heuristic to decide if we should back paginate. + record_time: Whether to record the time it takes to backfill. Returns: True if we actually tried to backfill something, otherwise False. """ # Starting the processing time here so we can include the room backfill # linearizer lock queue in the timing - processing_start_time = self.clock.time_msec() + processing_start_time = self.clock.time_msec() if record_time else 0 async with self._room_backfill.queue(room_id): - return await self._maybe_backfill_inner( - room_id, - current_depth, - limit, - processing_start_time=processing_start_time, - ) + async with self._worker_locks.acquire_read_write_lock( + PURGE_PAGINATION_LOCK_NAME, room_id, write=False + ): + return await self._maybe_backfill_inner( + room_id, + current_depth, + limit, + processing_start_time=processing_start_time, + ) @trace @tag_args @@ -305,12 +311,21 @@ class FederationHandler: # of history that extends all the way back to where we are currently paginating # and it's within the 100 events that are returned from `/backfill`. if not sorted_backfill_points and current_depth != MAX_DEPTH: + # Check that we actually have later backfill points, if not just return. + have_later_backfill_points = await self.store.get_backfill_points_in_room( + room_id=room_id, + current_depth=MAX_DEPTH, + limit=1, + ) + if not have_later_backfill_points: + return False + logger.debug( "_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points." ) run_as_background_process( "_maybe_backfill_inner_anyway_with_max_depth", - self._maybe_backfill_inner, + self.maybe_backfill, room_id=room_id, # We use `MAX_DEPTH` so that we find all backfill points next # time (all events are below the `MAX_DEPTH`) @@ -319,7 +334,7 @@ class FederationHandler: # We don't want to start another timing observation from this # nested recursive call. The top-most call can record the time # overall otherwise the smaller one will throw off the results. - processing_start_time=None, + record_time=False, ) # We return `False` because we're backfilling in the background and there is # no new events immediately for the caller to know about yet. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 1be6ebc6d9..e5ac9096cc 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -487,155 +487,150 @@ class PaginationHandler: room_token = from_token.room_key - async with self._worker_locks.acquire_read_write_lock( - PURGE_PAGINATION_LOCK_NAME, room_id, write=False - ): - (membership, member_event_id) = (None, None) - if not use_admin_priviledge: - ( - membership, - member_event_id, - ) = await self.auth.check_user_in_room_or_world_readable( - room_id, requester, allow_departed_users=True - ) - - if pagin_config.direction == Direction.BACKWARDS: - # if we're going backwards, we might need to backfill. This - # requires that we have a topo token. - if room_token.topological: - curr_topo = room_token.topological - else: - curr_topo = await self.store.get_current_topological_token( - room_id, room_token.stream - ) - - # If they have left the room then clamp the token to be before - # they left the room, to save the effort of loading from the - # database. - if ( - pagin_config.direction == Direction.BACKWARDS - and not use_admin_priviledge - and membership == Membership.LEAVE - ): - # This is only None if the room is world_readable, in which case - # "Membership.JOIN" would have been returned and we should never hit - # this branch. - assert member_event_id - - leave_token = await self.store.get_topological_token_for_event( - member_event_id - ) - assert leave_token.topological is not None - - if leave_token.topological < curr_topo: - from_token = from_token.copy_and_replace( - StreamKeyType.ROOM, leave_token - ) - - to_room_key = None - if pagin_config.to_token: - to_room_key = pagin_config.to_token.room_key - - # Initially fetch the events from the database. With any luck, we can return - # these without blocking on backfill (handled below). - events, next_key = await self.store.paginate_room_events( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, + (membership, member_event_id) = (None, None) + if not use_admin_priviledge: + ( + membership, + member_event_id, + ) = await self.auth.check_user_in_room_or_world_readable( + room_id, requester, allow_departed_users=True ) - if pagin_config.direction == Direction.BACKWARDS: - # We use a `Set` because there can be multiple events at a given depth - # and we only care about looking at the unique continum of depths to - # find gaps. - event_depths: Set[int] = {event.depth for event in events} - sorted_event_depths = sorted(event_depths) - - # Inspect the depths of the returned events to see if there are any gaps - found_big_gap = False - number_of_gaps = 0 - previous_event_depth = ( - sorted_event_depths[0] if len(sorted_event_depths) > 0 else 0 + if pagin_config.direction == Direction.BACKWARDS: + # if we're going backwards, we might need to backfill. This + # requires that we have a topo token. + if room_token.topological: + curr_topo = room_token.topological + else: + curr_topo = await self.store.get_current_topological_token( + room_id, room_token.stream ) - for event_depth in sorted_event_depths: - # We don't expect a negative depth but we'll just deal with it in - # any case by taking the absolute value to get the true gap between - # any two integers. - depth_gap = abs(event_depth - previous_event_depth) - # A `depth_gap` of 1 is a normal continuous chain to the next event - # (1 <-- 2 <-- 3) so anything larger indicates a missing event (it's - # also possible there is no event at a given depth but we can't ever - # know that for sure) - if depth_gap > 1: - number_of_gaps += 1 - # We only tolerate a small number single-event long gaps in the - # returned events because those are most likely just events we've - # failed to pull in the past. Anything longer than that is probably - # a sign that we're missing a decent chunk of history and we should - # try to backfill it. - # - # XXX: It's possible we could tolerate longer gaps if we checked - # that a given events `prev_events` is one that has failed pull - # attempts and we could just treat it like a dead branch of history - # for now or at least something that we don't need the block the - # client on to try pulling. - # - # XXX: If we had something like MSC3871 to indicate gaps in the - # timeline to the client, we could also get away with any sized gap - # and just have the client refetch the holes as they see fit. - if depth_gap > 2: - found_big_gap = True - break - previous_event_depth = event_depth + # If they have left the room then clamp the token to be before + # they left the room, to save the effort of loading from the + # database. + if ( + pagin_config.direction == Direction.BACKWARDS + and not use_admin_priviledge + and membership == Membership.LEAVE + ): + # This is only None if the room is world_readable, in which case + # "Membership.JOIN" would have been returned and we should never hit + # this branch. + assert member_event_id - # Backfill in the foreground if we found a big gap, have too many holes, - # or we don't have enough events to fill the limit that the client asked - # for. - missing_too_many_events = ( - number_of_gaps > BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD + leave_token = await self.store.get_topological_token_for_event( + member_event_id + ) + assert leave_token.topological is not None + + if leave_token.topological < curr_topo: + from_token = from_token.copy_and_replace( + StreamKeyType.ROOM, leave_token ) - not_enough_events_to_fill_response = len(events) < pagin_config.limit - if ( - found_big_gap - or missing_too_many_events - or not_enough_events_to_fill_response - ): - did_backfill = ( - await self.hs.get_federation_handler().maybe_backfill( - room_id, - curr_topo, - limit=pagin_config.limit, - ) - ) - # If we did backfill something, refetch the events from the database to - # catch anything new that might have been added since we last fetched. - if did_backfill: - events, next_key = await self.store.paginate_room_events( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, - ) - else: - # Otherwise, we can backfill in the background for eventual - # consistency's sake but we don't need to block the client waiting - # for a costly federation call and processing. - run_as_background_process( - "maybe_backfill_in_the_background", - self.hs.get_federation_handler().maybe_backfill, - room_id, - curr_topo, + to_room_key = None + if pagin_config.to_token: + to_room_key = pagin_config.to_token.room_key + + # Initially fetch the events from the database. With any luck, we can return + # these without blocking on backfill (handled below). + events, next_key = await self.store.paginate_room_events( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, + ) + + if pagin_config.direction == Direction.BACKWARDS: + # We use a `Set` because there can be multiple events at a given depth + # and we only care about looking at the unique continum of depths to + # find gaps. + event_depths: Set[int] = {event.depth for event in events} + sorted_event_depths = sorted(event_depths) + + # Inspect the depths of the returned events to see if there are any gaps + found_big_gap = False + number_of_gaps = 0 + previous_event_depth = ( + sorted_event_depths[0] if len(sorted_event_depths) > 0 else 0 + ) + for event_depth in sorted_event_depths: + # We don't expect a negative depth but we'll just deal with it in + # any case by taking the absolute value to get the true gap between + # any two integers. + depth_gap = abs(event_depth - previous_event_depth) + # A `depth_gap` of 1 is a normal continuous chain to the next event + # (1 <-- 2 <-- 3) so anything larger indicates a missing event (it's + # also possible there is no event at a given depth but we can't ever + # know that for sure) + if depth_gap > 1: + number_of_gaps += 1 + + # We only tolerate a small number single-event long gaps in the + # returned events because those are most likely just events we've + # failed to pull in the past. Anything longer than that is probably + # a sign that we're missing a decent chunk of history and we should + # try to backfill it. + # + # XXX: It's possible we could tolerate longer gaps if we checked + # that a given events `prev_events` is one that has failed pull + # attempts and we could just treat it like a dead branch of history + # for now or at least something that we don't need the block the + # client on to try pulling. + # + # XXX: If we had something like MSC3871 to indicate gaps in the + # timeline to the client, we could also get away with any sized gap + # and just have the client refetch the holes as they see fit. + if depth_gap > 2: + found_big_gap = True + break + previous_event_depth = event_depth + + # Backfill in the foreground if we found a big gap, have too many holes, + # or we don't have enough events to fill the limit that the client asked + # for. + missing_too_many_events = ( + number_of_gaps > BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD + ) + not_enough_events_to_fill_response = len(events) < pagin_config.limit + if ( + found_big_gap + or missing_too_many_events + or not_enough_events_to_fill_response + ): + did_backfill = await self.hs.get_federation_handler().maybe_backfill( + room_id, + curr_topo, + limit=pagin_config.limit, + ) + + # If we did backfill something, refetch the events from the database to + # catch anything new that might have been added since we last fetched. + if did_backfill: + events, next_key = await self.store.paginate_room_events( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, limit=pagin_config.limit, + event_filter=event_filter, ) + else: + # Otherwise, we can backfill in the background for eventual + # consistency's sake but we don't need to block the client waiting + # for a costly federation call and processing. + run_as_background_process( + "maybe_backfill_in_the_background", + self.hs.get_federation_handler().maybe_backfill, + room_id, + curr_topo, + limit=pagin_config.limit, + ) - next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key) + next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key) # if no events are returned from pagination, that implies # we have reached the end of the available events. From 19a1cda084342034cc92c88c0376cbcadbf8e2a0 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Wed, 23 Aug 2023 08:35:23 +0000 Subject: [PATCH 352/562] Properly update retry_last_ts when hitting the maximum retry interval (#16156) * Properly update retry_last_ts when hitting the maximum retry interval This was broken in 1.87 when the maximum retry interval got changed from almost infinite to a week (and made configurable). fixes #16101 Signed-off-by: Nicolas Werner * Add changelog * Change fix + add test * Add comment --------- Signed-off-by: Nicolas Werner Co-authored-by: Mathieu Velten --- changelog.d/16156.bugfix | 1 + .../storage/databases/main/transactions.py | 4 +- tests/util/test_retryutils.py | 51 +++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16156.bugfix diff --git a/changelog.d/16156.bugfix b/changelog.d/16156.bugfix new file mode 100644 index 0000000000..17284297cf --- /dev/null +++ b/changelog.d/16156.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index c3bd36efc9..48e4b0ba3c 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -242,6 +242,8 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): ) -> None: # Upsert retry time interval if retry_interval is zero (i.e. we're # resetting it) or greater than the existing retry interval. + # We also upsert when the new retry interval is the same as the existing one, + # since it will be the case when `destination_max_retry_interval` is reached. # # WARNING: This is executed in autocommit, so we shouldn't add any more # SQL calls in here (without being very careful). @@ -257,7 +259,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): WHERE EXCLUDED.retry_interval = 0 OR destinations.retry_interval IS NULL - OR destinations.retry_interval < EXCLUDED.retry_interval + OR destinations.retry_interval <= EXCLUDED.retry_interval """ txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval)) diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 1277e1a865..4bcd17a6fc 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -108,3 +108,54 @@ class RetryLimiterTestCase(HomeserverTestCase): new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) self.assertIsNone(new_timings) + + def test_max_retry_interval(self) -> None: + """Test that `destination_max_retry_interval` setting works as expected""" + store = self.hs.get_datastores().main + + destination_max_retry_interval_ms = ( + self.hs.config.federation.destination_max_retry_interval_ms + ) + + self.get_success(get_retry_limiter("test_dest", self.clock, store)) + self.pump(1) + + failure_ts = self.clock.time_msec() + + # Simulate reaching destination_max_retry_interval + self.get_success( + store.set_destination_retry_timings( + "test_dest", + failure_ts=failure_ts, + retry_last_ts=failure_ts, + retry_interval=destination_max_retry_interval_ms, + ) + ) + + # Check it fails + self.get_failure( + get_retry_limiter("test_dest", self.clock, store), NotRetryingDestination + ) + + # Get past retry_interval and we can try again, and still throw an error to continue the backoff + self.reactor.advance(destination_max_retry_interval_ms / 1000 + 1) + limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store)) + self.pump(1) + try: + with limiter: + self.pump(1) + raise AssertionError("argh") + except AssertionError: + pass + + self.pump() + + # retry_interval does not increase and stays at destination_max_retry_interval_ms + new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + assert new_timings is not None + self.assertEqual(new_timings.retry_interval, destination_max_retry_interval_ms) + + # Check it fails + self.get_failure( + get_retry_limiter("test_dest", self.clock, store), NotRetryingDestination + ) From da162cbe4e748841e93849c87374023a0fcbb390 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 23 Aug 2023 07:31:00 -0400 Subject: [PATCH 353/562] Add tests for restoring the presence state after a restart. (#16151) --- changelog.d/16150.misc | 2 +- changelog.d/16151.misc | 1 + tests/handlers/test_presence.py | 116 ++++++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16151.misc diff --git a/changelog.d/16150.misc b/changelog.d/16150.misc index 97861282fd..41059378c5 100644 --- a/changelog.d/16150.misc +++ b/changelog.d/16150.misc @@ -1 +1 @@ -Clean-up calling `setup_background_tasks` in unit tests. +Improve presence tests. diff --git a/changelog.d/16151.misc b/changelog.d/16151.misc new file mode 100644 index 0000000000..41059378c5 --- /dev/null +++ b/changelog.d/16151.misc @@ -0,0 +1 @@ +Improve presence tests. diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 1f483eb75a..1aebcc16ad 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -38,6 +38,7 @@ from synapse.handlers.presence import ( from synapse.rest import admin from synapse.rest.client import room from synapse.server import HomeServer +from synapse.storage.database import LoggingDatabaseConnection from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util import Clock @@ -513,6 +514,121 @@ class PresenceTimeoutTestCase(unittest.TestCase): self.assertEqual(state, new_state) +class PresenceHandlerInitTestCase(unittest.HomeserverTestCase): + def default_config(self) -> JsonDict: + config = super().default_config() + # Disable background tasks on this worker so that the PresenceHandler isn't + # loaded until we request it. + config["run_background_tasks_on"] = "other" + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.user_id = f"@test:{self.hs.config.server.server_name}" + + # Move the reactor to the initial time. + self.reactor.advance(1000) + now = self.clock.time_msec() + + main_store = hs.get_datastores().main + self.get_success( + main_store.update_presence( + [ + UserPresenceState( + user_id=self.user_id, + state=PresenceState.ONLINE, + last_active_ts=now, + last_federation_update_ts=now, + last_user_sync_ts=now, + status_msg=None, + currently_active=True, + ) + ] + ) + ) + + # Regenerate the preloaded presence information on PresenceStore. + def refill_presence(db_conn: LoggingDatabaseConnection) -> None: + main_store._presence_on_startup = main_store._get_active_presence(db_conn) + + self.get_success(main_store.db_pool.runWithConnection(refill_presence)) + + def test_restored_presence_idles(self) -> None: + """The presence state restored from the database should not persist forever.""" + + # Get the handler (which kicks off a bunch of timers). + presence_handler = self.hs.get_presence_handler() + + # Assert the user is online. + state = self.get_success( + presence_handler.get_state(UserID.from_string(self.user_id)) + ) + self.assertEqual(state.state, PresenceState.ONLINE) + + # Advance such that the user should timeout. + self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000) + self.reactor.pump([5]) + + # Check that the user is now offline. + state = self.get_success( + presence_handler.get_state(UserID.from_string(self.user_id)) + ) + self.assertEqual(state.state, PresenceState.OFFLINE) + + @parameterized.expand( + [ + (PresenceState.BUSY, PresenceState.BUSY), + (PresenceState.ONLINE, PresenceState.ONLINE), + (PresenceState.UNAVAILABLE, PresenceState.UNAVAILABLE), + # Offline syncs don't update the state. + (PresenceState.OFFLINE, PresenceState.ONLINE), + ] + ) + @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) + def test_restored_presence_online_after_sync( + self, sync_state: str, expected_state: str + ) -> None: + """ + The presence state restored from the database should be overridden with sync after a timeout. + + Args: + sync_state: The presence state of the new sync. + expected_state: The expected presence right after the sync. + """ + + # Get the handler (which kicks off a bunch of timers). + presence_handler = self.hs.get_presence_handler() + + # Assert the user is online, as restored. + state = self.get_success( + presence_handler.get_state(UserID.from_string(self.user_id)) + ) + self.assertEqual(state.state, PresenceState.ONLINE) + + # Advance slightly and sync. + self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000 / 2) + self.get_success( + presence_handler.user_syncing( + self.user_id, sync_state != PresenceState.OFFLINE, sync_state + ) + ) + + # Assert the user is in the expected state. + state = self.get_success( + presence_handler.get_state(UserID.from_string(self.user_id)) + ) + self.assertEqual(state.state, expected_state) + + # Advance such that the user's preloaded data times out, but not the new sync. + self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000 / 2) + self.reactor.pump([5]) + + # Check that the user is in the sync state (as the client is currently syncing still). + state = self.get_success( + presence_handler.get_state(UserID.from_string(self.user_id)) + ) + self.assertEqual(state.state, sync_state) + + class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): user_id = "@test:server" user_id_obj = UserID.from_string(user_id) From 873971a8b9b4cbbc141df570e76a02c7b4b9b9c0 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 23 Aug 2023 13:37:51 +0200 Subject: [PATCH 354/562] Task scheduler: mark task as active if we are scheduling ASAP (#16165) --- changelog.d/16165.misc | 1 + synapse/storage/databases/main/task_scheduler.py | 2 +- synapse/util/task_scheduler.py | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16165.misc diff --git a/changelog.d/16165.misc b/changelog.d/16165.misc new file mode 100644 index 0000000000..b4d514d249 --- /dev/null +++ b/changelog.d/16165.misc @@ -0,0 +1 @@ +Task scheduler: mark task as active if we are scheduling as soon as possible. diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 1fb3180c3c..9ab120eea9 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -92,7 +92,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): if clauses: sql = sql + " WHERE " + " AND ".join(clauses) - sql = sql + "ORDER BY timestamp" + sql = sql + " ORDER BY timestamp" txn.execute(sql, args) return self.db_pool.cursor_to_dict(txn) diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 773a8327f6..4aea64b338 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -154,13 +154,15 @@ class TaskScheduler: f"No function associated with action {action} of the scheduled task" ) + status = TaskStatus.SCHEDULED if timestamp is None or timestamp < self._clock.time_msec(): timestamp = self._clock.time_msec() + status = TaskStatus.ACTIVE task = ScheduledTask( random_string(16), action, - TaskStatus.SCHEDULED, + status, timestamp, resource_id, params, From 86ecd341ec93167fbb5a335237c1cd629e7256a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Aug 2023 13:04:46 +0100 Subject: [PATCH 355/562] Always update `retry_last_ts` (#16164) --- changelog.d/16164.bugfix | 1 + synapse/storage/databases/main/transactions.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16164.bugfix diff --git a/changelog.d/16164.bugfix b/changelog.d/16164.bugfix new file mode 100644 index 0000000000..17284297cf --- /dev/null +++ b/changelog.d/16164.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 48e4b0ba3c..860bbf7c0f 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -242,8 +242,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): ) -> None: # Upsert retry time interval if retry_interval is zero (i.e. we're # resetting it) or greater than the existing retry interval. - # We also upsert when the new retry interval is the same as the existing one, - # since it will be the case when `destination_max_retry_interval` is reached. # # WARNING: This is executed in autocommit, so we shouldn't add any more # SQL calls in here (without being very careful). @@ -258,8 +256,10 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): retry_interval = EXCLUDED.retry_interval WHERE EXCLUDED.retry_interval = 0 + OR EXCLUDED.retry_last_ts = 0 OR destinations.retry_interval IS NULL - OR destinations.retry_interval <= EXCLUDED.retry_interval + OR destinations.retry_interval < EXCLUDED.retry_interval + OR destinations.retry_last_ts < EXCLUDED.retry_last_ts """ txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval)) From 7cd79ce0519964bf52a3f88d6fd8a5cc5dff5c6c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Aug 2023 13:45:19 +0100 Subject: [PATCH 356/562] Reduce DB contention on worker locks (#16160) --- changelog.d/16160.misc | 1 + .../03_read_write_locks_triggers.sql.postgres | 37 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 changelog.d/16160.misc create mode 100644 synapse/storage/schema/main/delta/80/03_read_write_locks_triggers.sql.postgres diff --git a/changelog.d/16160.misc b/changelog.d/16160.misc new file mode 100644 index 0000000000..78803b7bcd --- /dev/null +++ b/changelog.d/16160.misc @@ -0,0 +1 @@ +Reduce DB contention on worker locks. diff --git a/synapse/storage/schema/main/delta/80/03_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/80/03_read_write_locks_triggers.sql.postgres new file mode 100644 index 0000000000..31de5bfa18 --- /dev/null +++ b/synapse/storage/schema/main/delta/80/03_read_write_locks_triggers.sql.postgres @@ -0,0 +1,37 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Fix up the triggers that were in `78/04_read_write_locks_triggers.sql` + +-- Reduce the number of writes we do on this table. +-- +-- Note: that we still want to lock the row here (i.e. still do a `DO UPDATE +-- SET`) so that we serialize updates. +CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$ +BEGIN + INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token) + VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token) + ON CONFLICT (lock_name, lock_key) + DO UPDATE SET write_lock = NEW.write_lock + WHERE OLD.write_lock != NEW.write_lock; + RETURN NEW; +END +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS upsert_read_write_lock_parent_trigger ON worker_read_write_locks; +CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks + FOR EACH ROW + EXECUTE PROCEDURE upsert_read_write_lock_parent(); From 4adaba9acf224e14171a8a4b9c98ef0791c4a1e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Aug 2023 13:45:25 +0100 Subject: [PATCH 357/562] Fix rare deadlock when using read/write locks (#16133) --- changelog.d/16133.bugfix | 1 + .../02_read_write_locks_deadlock.sql.postgres | 37 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 changelog.d/16133.bugfix create mode 100644 synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres diff --git a/changelog.d/16133.bugfix b/changelog.d/16133.bugfix new file mode 100644 index 0000000000..ed8830692f --- /dev/null +++ b/changelog.d/16133.bugfix @@ -0,0 +1 @@ +Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. diff --git a/synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres b/synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres new file mode 100644 index 0000000000..401c42e18a --- /dev/null +++ b/synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres @@ -0,0 +1,37 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- To avoid the possibility of a deadlock, lock the +-- `worker_read_write_locks_mode` table so that we serialize inserts/deletes +-- for a specific lock name/key. + +CREATE OR REPLACE FUNCTION delete_read_write_lock_parent_before() RETURNS trigger AS $$ +BEGIN + -- `PERFORM` is a `SELECT` which discards the rows. + PERFORM * FROM worker_read_write_locks_mode + WHERE + lock_name = OLD.lock_name + AND lock_key = OLD.lock_key + FOR UPDATE; + + RETURN OLD; +END +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS delete_read_write_lock_parent_before_trigger ON worker_read_write_locks; +CREATE TRIGGER delete_read_write_lock_parent_before_trigger BEFORE DELETE ON worker_read_write_locks + FOR EACH ROW + EXECUTE PROCEDURE delete_read_write_lock_parent_before(); From ec662bbe413bd976af97f099ea4f11dafaf98b3e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 23 Aug 2023 14:00:34 +0100 Subject: [PATCH 358/562] Filter out unwanted user_agents from udv. (#16124) --- changelog.d/16124.bugfix | 1 + synapse/storage/databases/main/client_ips.py | 5 ++ tests/storage/test_client_ips.py | 65 ++++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 changelog.d/16124.bugfix diff --git a/changelog.d/16124.bugfix b/changelog.d/16124.bugfix new file mode 100644 index 0000000000..fb1d501a2f --- /dev/null +++ b/changelog.d/16124.bugfix @@ -0,0 +1 @@ +Filter out user agent references to the sliding sync proxy and rust-sdk from the user_daily_visits table to ensure that Element X can be represented fully. diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 0df160d2b0..d8d333e11d 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -579,6 +579,11 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke device_id: Optional[str], now: Optional[int] = None, ) -> None: + # The sync proxy continuously triggers /sync even if the user is not + # present so should be excluded from user_ips entries. + if user_agent == "sync-v3-proxy-": + return + if not now: now = int(self._clock.time_msec()) key = (user_id, access_token, ip) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index cd0079871c..209d68b40b 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -654,6 +654,71 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): r, ) + def test_invalid_user_agents_are_ignored(self) -> None: + # First make sure we have completed all updates. + self.wait_for_background_updates() + + user_id1 = "@user1:id" + user_id2 = "@user2:id" + device_id1 = "MY_DEVICE1" + device_id2 = "MY_DEVICE2" + access_token1 = "access_token1" + access_token2 = "access_token2" + + # Insert a user IP 1 + self.get_success( + self.store.store_device( + user_id1, + device_id1, + "display name1", + ) + ) + # Insert a user IP 2 + self.get_success( + self.store.store_device( + user_id2, + device_id2, + "display name2", + ) + ) + + self.get_success( + self.store.insert_client_ip( + user_id1, access_token1, "ip", "sync-v3-proxy-", device_id1 + ) + ) + self.get_success( + self.store.insert_client_ip( + user_id2, access_token2, "ip", "user_agent", device_id2 + ) + ) + # Force persisting to disk + self.reactor.advance(200) + + # We should see that in the DB + result = self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={}, + retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], + desc="get_user_ip_and_agents", + ) + ) + + # ensure user1 is filtered out + self.assertEqual( + result, + [ + { + "access_token": access_token2, + "ip": "ip", + "user_agent": "user_agent", + "device_id": device_id2, + "last_seen": 0, + } + ], + ) + class ClientIpAuthTestCase(unittest.HomeserverTestCase): servlets = [ From 85118420a226c5664f9cb8ea31d91cf842709740 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 23 Aug 2023 16:16:14 +0100 Subject: [PATCH 359/562] Switch `devenv` dependency in the nix development environment to the latest release (instead of the development branch) (#16063) --- changelog.d/16063.misc | 1 + flake.lock | 8 ++++---- flake.nix | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/16063.misc diff --git a/changelog.d/16063.misc b/changelog.d/16063.misc new file mode 100644 index 0000000000..069fc1adab --- /dev/null +++ b/changelog.d/16063.misc @@ -0,0 +1 @@ +Fix building the nix development environment on MacOS systems. diff --git a/flake.lock b/flake.lock index 084c40fe2f..d53be767a7 100644 --- a/flake.lock +++ b/flake.lock @@ -8,16 +8,16 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1690534632, - "narHash": "sha256-kOXS9x5y17VKliC7wZxyszAYrWdRl1JzggbQl0gyo94=", + "lastModified": 1688058187, + "narHash": "sha256-ipDcc7qrucpJ0+0eYNlwnE+ISTcq4m03qW+CWUshRXI=", "owner": "cachix", "repo": "devenv", - "rev": "6568e7e485a46bbf32051e4d6347fa1fed8b2f25", + "rev": "c8778e3dc30eb9043e218aaa3861d42d4992de77", "type": "github" }, "original": { "owner": "cachix", - "ref": "main", + "ref": "v0.6.3", "repo": "devenv", "type": "github" } diff --git a/flake.nix b/flake.nix index e70a41dfc2..b89b6d9218 100644 --- a/flake.nix +++ b/flake.nix @@ -45,7 +45,7 @@ # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS). systems.url = "github:nix-systems/default"; # A development environment manager built on Nix. See https://devenv.sh. - devenv.url = "github:cachix/devenv/main"; + devenv.url = "github:cachix/devenv/v0.6.3"; # Rust toolchain. rust-overlay.url = "github:oxalica/rust-overlay"; }; From 18279631e9555bd9032b993074e62c7af886d9cd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Aug 2023 16:24:30 +0100 Subject: [PATCH 360/562] Fix rare deadlock when using read/write locks (#16169) --- changelog.d/16169.bugfix | 1 + .../02_read_write_locks_deadlock.sql.postgres | 37 ---------- .../04_read_write_locks_deadlock.sql.postgres | 71 +++++++++++++++++++ 3 files changed, 72 insertions(+), 37 deletions(-) create mode 100644 changelog.d/16169.bugfix delete mode 100644 synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres create mode 100644 synapse/storage/schema/main/delta/80/04_read_write_locks_deadlock.sql.postgres diff --git a/changelog.d/16169.bugfix b/changelog.d/16169.bugfix new file mode 100644 index 0000000000..ed8830692f --- /dev/null +++ b/changelog.d/16169.bugfix @@ -0,0 +1 @@ +Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. diff --git a/synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres b/synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres deleted file mode 100644 index 401c42e18a..0000000000 --- a/synapse/storage/schema/main/delta/80/02_read_write_locks_deadlock.sql.postgres +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2023 The Matrix.org Foundation C.I.C - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- To avoid the possibility of a deadlock, lock the --- `worker_read_write_locks_mode` table so that we serialize inserts/deletes --- for a specific lock name/key. - -CREATE OR REPLACE FUNCTION delete_read_write_lock_parent_before() RETURNS trigger AS $$ -BEGIN - -- `PERFORM` is a `SELECT` which discards the rows. - PERFORM * FROM worker_read_write_locks_mode - WHERE - lock_name = OLD.lock_name - AND lock_key = OLD.lock_key - FOR UPDATE; - - RETURN OLD; -END -$$ -LANGUAGE plpgsql; - -DROP TRIGGER IF EXISTS delete_read_write_lock_parent_before_trigger ON worker_read_write_locks; -CREATE TRIGGER delete_read_write_lock_parent_before_trigger BEFORE DELETE ON worker_read_write_locks - FOR EACH ROW - EXECUTE PROCEDURE delete_read_write_lock_parent_before(); diff --git a/synapse/storage/schema/main/delta/80/04_read_write_locks_deadlock.sql.postgres b/synapse/storage/schema/main/delta/80/04_read_write_locks_deadlock.sql.postgres new file mode 100644 index 0000000000..0eb459c0b9 --- /dev/null +++ b/synapse/storage/schema/main/delta/80/04_read_write_locks_deadlock.sql.postgres @@ -0,0 +1,71 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- Remove a previous attempt to avoid deadlocks +DROP TRIGGER IF EXISTS delete_read_write_lock_parent_before_trigger ON worker_read_write_locks; +DROP FUNCTION IF EXISTS delete_read_write_lock_parent_before; + + +-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock +-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we +-- update the `worker_read_write_locks_mode.token` to match another instance +-- that has currently acquired the lock, or we delete the row if nobody has +-- currently acquired a lock. +CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$ +DECLARE + new_token TEXT; + mode_row_token TEXT; +BEGIN + -- Only update the token in `_mode` if its our token. This prevents + -- deadlocks. + -- + -- We shove the token into `mode_row_token`, as otherwise postgres complains + -- we're not using the returned data. + SELECT token INTO mode_row_token FROM worker_read_write_locks_mode + WHERE + lock_name = OLD.lock_name + AND lock_key = OLD.lock_key + AND token = OLD.token + FOR UPDATE; + + IF NOT FOUND THEN + RETURN NEW; + END IF; + + SELECT token INTO new_token FROM worker_read_write_locks + WHERE + lock_name = OLD.lock_name + AND lock_key = OLD.lock_key + LIMIT 1 FOR UPDATE SKIP LOCKED; + + IF NOT FOUND THEN + DELETE FROM worker_read_write_locks_mode + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key AND token = OLD.token; + ELSE + UPDATE worker_read_write_locks_mode + SET token = new_token + WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key; + END IF; + + RETURN NEW; +END +$$ +LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS delete_read_write_lock_parent_trigger ON worker_read_write_locks; +CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks + FOR EACH ROW + EXECUTE PROCEDURE delete_read_write_lock_parent(); From 7064b4bcf311c45855c0f9d11a9db8a963b252b9 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Wed, 23 Aug 2023 09:25:50 -0700 Subject: [PATCH 361/562] fix changelog --- CHANGES.md | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 666cd31ba0..33a1b808a6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,68 @@ +# Synapse 1.91.0rc1 (2023-08-23) + +### Features + +- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870)) +- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030)) +- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094)) +- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114)) + +### Bugfixes + +- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052)) +- Fix a long-standing bug in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080)) +- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116)) +- Filter out user agent references to the sliding sync proxy and rust-sdk from the `user_daily_visits` table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124)) +- User constent and third-party ID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134)) +- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169)) +- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148)) +- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164)) + +### Improved Documentation + +- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091)) + +### Internal Changes + +- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010)) +- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061)) +- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063)) +- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085)) +- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089)) +- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092)) +- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110)) +- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112)) +- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115)) +- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117)) +- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123)) +- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125)) +- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131)) +- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132)) +- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149)) +- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158)) +- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152)) +- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157)) +- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159)) +- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160)) +- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165)) +- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891)) + +### Updates to locked dependencies + +* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145)) +* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103)) +* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143)) +* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108)) +* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109)) +* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144)) +* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142)) +* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139)) +* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107)) +* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106)) +* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105)) +* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146)) + +>>>>>>> Stashed changes # Synapse 1.90.0 (2023-08-15) No significant changes since 1.90.0rc1. From 020ff1afe33bfcca89ad78a941c7e2334e93c621 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Wed, 23 Aug 2023 09:36:34 -0700 Subject: [PATCH 362/562] fix changelog --- CHANGES.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 33a1b808a6..bc0d4fe8c8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -62,7 +62,6 @@ * Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105)) * Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146)) ->>>>>>> Stashed changes # Synapse 1.90.0 (2023-08-15) No significant changes since 1.90.0rc1. From 23f88f9c595bc0c53d83b3ac95c77b68c5d86f25 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Wed, 23 Aug 2023 09:47:29 -0700 Subject: [PATCH 363/562] 1.91.0rc1 --- CHANGES.md | 64 +++++++++++++++++++++++++++++++++++++++ changelog.d/15870.feature | 1 - changelog.d/15891.feature | 1 - changelog.d/16010.misc | 1 - changelog.d/16030.feature | 1 - changelog.d/16052.bugfix | 1 - changelog.d/16061.misc | 1 - changelog.d/16063.misc | 1 - changelog.d/16080.bugfix | 1 - changelog.d/16085.misc | 1 - changelog.d/16089.misc | 1 - changelog.d/16091.doc | 1 - changelog.d/16092.misc | 1 - changelog.d/16094.feature | 1 - changelog.d/16110.misc | 1 - changelog.d/16112.misc | 1 - changelog.d/16114.feature | 1 - changelog.d/16115.misc | 1 - changelog.d/16116.bugfix | 1 - changelog.d/16117.misc | 1 - changelog.d/16123.misc | 1 - changelog.d/16124.bugfix | 1 - changelog.d/16125.misc | 1 - changelog.d/16127.bugfix | 1 - changelog.d/16131.misc | 1 - changelog.d/16132.misc | 1 - changelog.d/16133.bugfix | 1 - changelog.d/16134.bugfix | 1 - changelog.d/16148.bugfix | 1 - changelog.d/16149.misc | 1 - changelog.d/16150.misc | 1 - changelog.d/16151.misc | 1 - changelog.d/16152.misc | 1 - changelog.d/16156.bugfix | 1 - changelog.d/16157.misc | 1 - changelog.d/16158.misc | 1 - changelog.d/16159.misc | 1 - changelog.d/16160.misc | 1 - changelog.d/16164.bugfix | 1 - changelog.d/16165.misc | 1 - changelog.d/16169.bugfix | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 43 files changed, 71 insertions(+), 41 deletions(-) delete mode 100644 changelog.d/15870.feature delete mode 100644 changelog.d/15891.feature delete mode 100644 changelog.d/16010.misc delete mode 100644 changelog.d/16030.feature delete mode 100644 changelog.d/16052.bugfix delete mode 100644 changelog.d/16061.misc delete mode 100644 changelog.d/16063.misc delete mode 100644 changelog.d/16080.bugfix delete mode 100644 changelog.d/16085.misc delete mode 100644 changelog.d/16089.misc delete mode 100644 changelog.d/16091.doc delete mode 100644 changelog.d/16092.misc delete mode 100644 changelog.d/16094.feature delete mode 100644 changelog.d/16110.misc delete mode 100644 changelog.d/16112.misc delete mode 100644 changelog.d/16114.feature delete mode 100644 changelog.d/16115.misc delete mode 100644 changelog.d/16116.bugfix delete mode 100644 changelog.d/16117.misc delete mode 100644 changelog.d/16123.misc delete mode 100644 changelog.d/16124.bugfix delete mode 100644 changelog.d/16125.misc delete mode 100644 changelog.d/16127.bugfix delete mode 100644 changelog.d/16131.misc delete mode 100644 changelog.d/16132.misc delete mode 100644 changelog.d/16133.bugfix delete mode 100644 changelog.d/16134.bugfix delete mode 100644 changelog.d/16148.bugfix delete mode 100644 changelog.d/16149.misc delete mode 100644 changelog.d/16150.misc delete mode 100644 changelog.d/16151.misc delete mode 100644 changelog.d/16152.misc delete mode 100644 changelog.d/16156.bugfix delete mode 100644 changelog.d/16157.misc delete mode 100644 changelog.d/16158.misc delete mode 100644 changelog.d/16159.misc delete mode 100644 changelog.d/16160.misc delete mode 100644 changelog.d/16164.bugfix delete mode 100644 changelog.d/16165.misc delete mode 100644 changelog.d/16169.bugfix diff --git a/CHANGES.md b/CHANGES.md index bc0d4fe8c8..fcb50cd8c2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,6 +2,70 @@ ### Features +- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870)) +- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891)) +- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030)) +- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094)) +- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114)) + +### Bugfixes + +- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052)) +- Fix a long-standing bu in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080)) +- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116)) +- Filter out user agent references to the sliding sync proxy and rust-sdk from the user_daily_visits table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124)) +- User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134)) +- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169)) +- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148)) +- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164)) + +### Improved Documentation + +- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091)) + +### Internal Changes + +- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010)) +- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061)) +- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063)) +- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085)) +- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089)) +- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092)) +- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110)) +- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112)) +- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115)) +- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117)) +- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123)) +- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125)) +- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131)) +- MSC3861: allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132)) +- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149)) +- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158)) +- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152)) +- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157)) +- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159)) +- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160)) +- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165)) + +### Updates to locked dependencies + +* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145)) +* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103)) +* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143)) +* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108)) +* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109)) +* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144)) +* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142)) +* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139)) +* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107)) +* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106)) +* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105)) +* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146)) + +# Synapse 1.91.0rc1 (2023-08-23) + +### Features + - Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870)) - Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030)) - Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094)) diff --git a/changelog.d/15870.feature b/changelog.d/15870.feature deleted file mode 100644 index 527220d637..0000000000 --- a/changelog.d/15870.feature +++ /dev/null @@ -1 +0,0 @@ -Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). diff --git a/changelog.d/15891.feature b/changelog.d/15891.feature deleted file mode 100644 index 5024b5adc4..0000000000 --- a/changelog.d/15891.feature +++ /dev/null @@ -1 +0,0 @@ -Implements a task scheduler for resumable potentially long running tasks. diff --git a/changelog.d/16010.misc b/changelog.d/16010.misc deleted file mode 100644 index 1e1a148069..0000000000 --- a/changelog.d/16010.misc +++ /dev/null @@ -1 +0,0 @@ -Update dehydrated devices implementation. diff --git a/changelog.d/16030.feature b/changelog.d/16030.feature deleted file mode 100644 index c2f068085f..0000000000 --- a/changelog.d/16030.feature +++ /dev/null @@ -1 +0,0 @@ -Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. diff --git a/changelog.d/16052.bugfix b/changelog.d/16052.bugfix deleted file mode 100644 index 3c7a60f226..0000000000 --- a/changelog.d/16052.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/16061.misc b/changelog.d/16061.misc deleted file mode 100644 index 37928b670f..0000000000 --- a/changelog.d/16061.misc +++ /dev/null @@ -1 +0,0 @@ -Fix database performance of read/write worker locks. diff --git a/changelog.d/16063.misc b/changelog.d/16063.misc deleted file mode 100644 index 069fc1adab..0000000000 --- a/changelog.d/16063.misc +++ /dev/null @@ -1 +0,0 @@ -Fix building the nix development environment on MacOS systems. diff --git a/changelog.d/16080.bugfix b/changelog.d/16080.bugfix deleted file mode 100644 index 1ad6fb3c52..0000000000 --- a/changelog.d/16080.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bu in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. \ No newline at end of file diff --git a/changelog.d/16085.misc b/changelog.d/16085.misc deleted file mode 100644 index 7b7a95edd4..0000000000 --- a/changelog.d/16085.misc +++ /dev/null @@ -1 +0,0 @@ -Override global statement timeout when creating indexes in Postgres. diff --git a/changelog.d/16089.misc b/changelog.d/16089.misc deleted file mode 100644 index 8c302e6884..0000000000 --- a/changelog.d/16089.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the type annotation on `run_db_interaction` in the Module API. \ No newline at end of file diff --git a/changelog.d/16091.doc b/changelog.d/16091.doc deleted file mode 100644 index a043df4efd..0000000000 --- a/changelog.d/16091.doc +++ /dev/null @@ -1 +0,0 @@ -Structured logging docs: add a link to explain the ELK stack diff --git a/changelog.d/16092.misc b/changelog.d/16092.misc deleted file mode 100644 index b520807771..0000000000 --- a/changelog.d/16092.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up the presence code. diff --git a/changelog.d/16094.feature b/changelog.d/16094.feature deleted file mode 100644 index 3be71badb9..0000000000 --- a/changelog.d/16094.feature +++ /dev/null @@ -1 +0,0 @@ -Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). diff --git a/changelog.d/16110.misc b/changelog.d/16110.misc deleted file mode 100644 index 68efe86ddc..0000000000 --- a/changelog.d/16110.misc +++ /dev/null @@ -1 +0,0 @@ -Run `pyupgrade` for Python 3.8+. diff --git a/changelog.d/16112.misc b/changelog.d/16112.misc deleted file mode 100644 index 05a58c1348..0000000000 --- a/changelog.d/16112.misc +++ /dev/null @@ -1 +0,0 @@ -Rename pagination and purge locks and add comments to explain why they exist and how they work. diff --git a/changelog.d/16114.feature b/changelog.d/16114.feature deleted file mode 100644 index e937a3b029..0000000000 --- a/changelog.d/16114.feature +++ /dev/null @@ -1 +0,0 @@ -Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. \ No newline at end of file diff --git a/changelog.d/16115.misc b/changelog.d/16115.misc deleted file mode 100644 index f325d2a31d..0000000000 --- a/changelog.d/16115.misc +++ /dev/null @@ -1 +0,0 @@ -Attempt to fix the twisted trunk job. diff --git a/changelog.d/16116.bugfix b/changelog.d/16116.bugfix deleted file mode 100644 index f57a26ae39..0000000000 --- a/changelog.d/16116.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. diff --git a/changelog.d/16117.misc b/changelog.d/16117.misc deleted file mode 100644 index f33fa6dc17..0000000000 --- a/changelog.d/16117.misc +++ /dev/null @@ -1 +0,0 @@ -Cache token introspection response from OIDC provider. diff --git a/changelog.d/16123.misc b/changelog.d/16123.misc deleted file mode 100644 index b7c6b7c2f2..0000000000 --- a/changelog.d/16123.misc +++ /dev/null @@ -1 +0,0 @@ -Add cache to `get_server_keys_json_for_remote`. diff --git a/changelog.d/16124.bugfix b/changelog.d/16124.bugfix deleted file mode 100644 index fb1d501a2f..0000000000 --- a/changelog.d/16124.bugfix +++ /dev/null @@ -1 +0,0 @@ -Filter out user agent references to the sliding sync proxy and rust-sdk from the user_daily_visits table to ensure that Element X can be represented fully. diff --git a/changelog.d/16125.misc b/changelog.d/16125.misc deleted file mode 100644 index 2f1bf23108..0000000000 --- a/changelog.d/16125.misc +++ /dev/null @@ -1 +0,0 @@ -Add an admin endpoint to allow authorizing server to signal token revocations. diff --git a/changelog.d/16127.bugfix b/changelog.d/16127.bugfix deleted file mode 100644 index 9ce5f4a705..0000000000 --- a/changelog.d/16127.bugfix +++ /dev/null @@ -1 +0,0 @@ -User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. diff --git a/changelog.d/16131.misc b/changelog.d/16131.misc deleted file mode 100644 index 4f04699512..0000000000 --- a/changelog.d/16131.misc +++ /dev/null @@ -1 +0,0 @@ -Add response time metrics for introspection requests for delegated auth. diff --git a/changelog.d/16132.misc b/changelog.d/16132.misc deleted file mode 100644 index aca26079d8..0000000000 --- a/changelog.d/16132.misc +++ /dev/null @@ -1 +0,0 @@ -MSC3861: allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. diff --git a/changelog.d/16133.bugfix b/changelog.d/16133.bugfix deleted file mode 100644 index ed8830692f..0000000000 --- a/changelog.d/16133.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. diff --git a/changelog.d/16134.bugfix b/changelog.d/16134.bugfix deleted file mode 100644 index 9ce5f4a705..0000000000 --- a/changelog.d/16134.bugfix +++ /dev/null @@ -1 +0,0 @@ -User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. diff --git a/changelog.d/16148.bugfix b/changelog.d/16148.bugfix deleted file mode 100644 index fea316f856..0000000000 --- a/changelog.d/16148.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix performance degredation when there are a lot of in-flight replication requests. diff --git a/changelog.d/16149.misc b/changelog.d/16149.misc deleted file mode 100644 index 8b6674d2aa..0000000000 --- a/changelog.d/16149.misc +++ /dev/null @@ -1 +0,0 @@ -Increase performance of read/write locks. diff --git a/changelog.d/16150.misc b/changelog.d/16150.misc deleted file mode 100644 index 41059378c5..0000000000 --- a/changelog.d/16150.misc +++ /dev/null @@ -1 +0,0 @@ -Improve presence tests. diff --git a/changelog.d/16151.misc b/changelog.d/16151.misc deleted file mode 100644 index 41059378c5..0000000000 --- a/changelog.d/16151.misc +++ /dev/null @@ -1 +0,0 @@ -Improve presence tests. diff --git a/changelog.d/16152.misc b/changelog.d/16152.misc deleted file mode 100644 index f8bf9f2c52..0000000000 --- a/changelog.d/16152.misc +++ /dev/null @@ -1 +0,0 @@ -Raised the poetry-core version cap to 1.7.0. diff --git a/changelog.d/16156.bugfix b/changelog.d/16156.bugfix deleted file mode 100644 index 17284297cf..0000000000 --- a/changelog.d/16156.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. diff --git a/changelog.d/16157.misc b/changelog.d/16157.misc deleted file mode 100644 index c9d8999cca..0000000000 --- a/changelog.d/16157.misc +++ /dev/null @@ -1 +0,0 @@ -Fix assertion in user directory unit tests. diff --git a/changelog.d/16158.misc b/changelog.d/16158.misc deleted file mode 100644 index 41059378c5..0000000000 --- a/changelog.d/16158.misc +++ /dev/null @@ -1 +0,0 @@ -Improve presence tests. diff --git a/changelog.d/16159.misc b/changelog.d/16159.misc deleted file mode 100644 index 04cdd1afaf..0000000000 --- a/changelog.d/16159.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce scope of locks when paginating to alleviate DB contention. diff --git a/changelog.d/16160.misc b/changelog.d/16160.misc deleted file mode 100644 index 78803b7bcd..0000000000 --- a/changelog.d/16160.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce DB contention on worker locks. diff --git a/changelog.d/16164.bugfix b/changelog.d/16164.bugfix deleted file mode 100644 index 17284297cf..0000000000 --- a/changelog.d/16164.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. diff --git a/changelog.d/16165.misc b/changelog.d/16165.misc deleted file mode 100644 index b4d514d249..0000000000 --- a/changelog.d/16165.misc +++ /dev/null @@ -1 +0,0 @@ -Task scheduler: mark task as active if we are scheduling as soon as possible. diff --git a/changelog.d/16169.bugfix b/changelog.d/16169.bugfix deleted file mode 100644 index ed8830692f..0000000000 --- a/changelog.d/16169.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. diff --git a/debian/changelog b/debian/changelog index ad9a4b3c8c..8b0615e421 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.91.0~rc1) stable; urgency=medium + + * New Synapse release 1.91.0rc1. + + -- Synapse Packaging team Wed, 23 Aug 2023 09:47:18 -0700 + matrix-synapse-py3 (1.90.0) stable; urgency=medium * New Synapse release 1.90.0. diff --git a/pyproject.toml b/pyproject.toml index 0585a9b01e..c2421d7257 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.90.0" +version = "1.91.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 33fa82a34cb0001787889be88c3817688ce2f76d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 23 Aug 2023 13:22:34 -0400 Subject: [PATCH 364/562] Stabilize support for MSC3958 (suppress notifications from edits). (#16113) --- changelog.d/16113.feature | 1 + rust/benches/evaluator.rs | 1 - rust/src/push/base_rules.rs | 2 +- rust/src/push/evaluator.rs | 2 +- rust/src/push/mod.rs | 9 --------- stubs/synapse/synapse_rust/push.pyi | 1 - synapse/config/experimental.py | 5 ----- synapse/storage/databases/main/push_rule.py | 1 - tests/push/test_bulk_push_rule_evaluator.py | 1 - 9 files changed, 3 insertions(+), 20 deletions(-) create mode 100644 changelog.d/16113.feature diff --git a/changelog.d/16113.feature b/changelog.d/16113.feature new file mode 100644 index 0000000000..69fdaaebac --- /dev/null +++ b/changelog.d/16113.feature @@ -0,0 +1 @@ +Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 6e1eab2a3b..14071105a0 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -197,7 +197,6 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, - false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 00baceda91..59fd27665a 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -228,7 +228,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ // We don't want to notify on edits *unless* the edit directly mentions a // user, which is handled above. PushRule { - rule_id: Cow::Borrowed("global/override/.org.matrix.msc3958.suppress_edits"), + rule_id: Cow::Borrowed("global/override/.m.rule.suppress_edits"), priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs( EventPropertyIsCondition { diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 48e670478b..5b9bf9b26a 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -564,7 +564,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 829fb79d0e..8e91f506cc 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -527,7 +527,6 @@ pub struct FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3958_suppress_edits_enabled: bool, } #[pymethods] @@ -539,7 +538,6 @@ impl FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3958_suppress_edits_enabled: bool, ) -> Self { Self { push_rules, @@ -547,7 +545,6 @@ impl FilteredPushRules { msc1767_enabled, msc3381_polls_enabled, msc3664_enabled, - msc3958_suppress_edits_enabled, } } @@ -584,12 +581,6 @@ impl FilteredPushRules { return false; } - if !self.msc3958_suppress_edits_enabled - && rule.rule_id == "global/override/.org.matrix.msc3958.suppress_edits" - { - return false; - } - true }) .map(|r| { diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index d573a37b9a..1f432d4ecf 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -46,7 +46,6 @@ class FilteredPushRules: msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, - msc3958_suppress_edits_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 277ea4675b..84d6dd13af 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -383,11 +383,6 @@ class ExperimentalConfig(Config): # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) - # MSC3959: Do not generate notifications for edits. - self.msc3958_supress_edit_notifs = experimental.get( - "msc3958_supress_edit_notifs", False - ) - # MSC3967: Do not require UIA when first uploading cross signing keys self.msc3967_enabled = experimental.get("msc3967_enabled", False) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index c13c0bc7d7..bec0dc2afe 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -88,7 +88,6 @@ def _load_rules( msc1767_enabled=experimental_config.msc1767_enabled, msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, - msc3958_suppress_edits_enabled=experimental_config.msc3958_supress_edit_notifs, ) return filtered_rules diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 829b9df83d..937e6ebb7d 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -382,7 +382,6 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): ) ) - @override_config({"experimental_features": {"msc3958_supress_edit_notifs": True}}) def test_suppress_edits(self) -> None: """Under the default push rules, event edits should not generate notifications.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) From e3333bacffda6a9b27044570b11e684623b01d93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Aug 2023 08:49:37 -0400 Subject: [PATCH 365/562] Bump serde_json from 1.0.104 to 1.0.105 (#16140) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 61c0f1bd04..f53e4f6205 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" dependencies = [ "itoa", "ryu", From 0538e3e2dba8ff5bbc13f11d796e696f6ba8a7c7 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Thu, 24 Aug 2023 15:40:26 +0100 Subject: [PATCH 366/562] Add `Retry-After` to M_LIMIT_EXCEEDED error responses (#16136) Implements MSC4041 behind an experimental configuration flag. --- changelog.d/16136.feature | 1 + synapse/api/errors.py | 10 ++++++++- synapse/config/experimental.py | 9 +++++++++ tests/api/test_errors.py | 36 +++++++++++++++++++++++++++++++++ tests/rest/client/test_login.py | 24 ++++++++++++++++------ 5 files changed, 73 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16136.feature create mode 100644 tests/api/test_errors.py diff --git a/changelog.d/16136.feature b/changelog.d/16136.feature new file mode 100644 index 0000000000..4ad98a88c3 --- /dev/null +++ b/changelog.d/16136.feature @@ -0,0 +1 @@ +Return a `Retry-After` with `M_LIMIT_EXCEEDED` error responses. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 7ffd72c42c..578e798773 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -16,6 +16,7 @@ """Contains exceptions and error codes.""" import logging +import math import typing from enum import Enum from http import HTTPStatus @@ -503,6 +504,8 @@ class InvalidCaptchaError(SynapseError): class LimitExceededError(SynapseError): """A client has sent too many requests and is being throttled.""" + include_retry_after_header = False + def __init__( self, code: int = 429, @@ -510,7 +513,12 @@ class LimitExceededError(SynapseError): retry_after_ms: Optional[int] = None, errcode: str = Codes.LIMIT_EXCEEDED, ): - super().__init__(code, msg, errcode) + headers = ( + {"Retry-After": str(math.ceil(retry_after_ms / 1000))} + if self.include_retry_after_header and retry_after_ms is not None + else None + ) + super().__init__(code, msg, errcode, headers=headers) self.retry_after_ms = retry_after_ms def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 84d6dd13af..cabe0d4397 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -18,6 +18,7 @@ from typing import TYPE_CHECKING, Any, Optional import attr import attr.validators +from synapse.api.errors import LimitExceededError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config import ConfigError from synapse.config._base import Config, RootConfig @@ -406,3 +407,11 @@ class ExperimentalConfig(Config): self.msc4010_push_rules_account_data = experimental.get( "msc4010_push_rules_account_data", False ) + + # MSC4041: Use HTTP header Retry-After to enable library-assisted retry handling + # + # This is a bit hacky, but the most reasonable way to *alway* include the + # headers. + LimitExceededError.include_retry_after_header = experimental.get( + "msc4041_enabled", False + ) diff --git a/tests/api/test_errors.py b/tests/api/test_errors.py new file mode 100644 index 0000000000..319abfe63d --- /dev/null +++ b/tests/api/test_errors.py @@ -0,0 +1,36 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.api.errors import LimitExceededError + +from tests import unittest + + +class ErrorsTestCase(unittest.TestCase): + # Create a sub-class to avoid mutating the class-level property. + class LimitExceededErrorHeaders(LimitExceededError): + include_retry_after_header = True + + def test_limit_exceeded_header(self) -> None: + err = ErrorsTestCase.LimitExceededErrorHeaders(retry_after_ms=100) + self.assertEqual(err.error_dict(None).get("retry_after_ms"), 100) + assert err.headers is not None + self.assertEqual(err.headers.get("Retry-After"), "1") + + def test_limit_exceeded_rounding(self) -> None: + err = ErrorsTestCase.LimitExceededErrorHeaders(retry_after_ms=3001) + self.assertEqual(err.error_dict(None).get("retry_after_ms"), 3001) + assert err.headers is not None + self.assertEqual(err.headers.get("Retry-After"), "4") diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index ffbc13bb8d..62c32cae5e 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -169,7 +169,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # which sets these values to 10000, but as we're overriding the entire # rc_login dict here, we need to set this manually as well "account": {"per_second": 10000, "burst_count": 10000}, - } + }, + "experimental_features": {"msc4041_enabled": True}, } ) def test_POST_ratelimiting_per_address(self) -> None: @@ -189,12 +190,15 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): if i == 5: self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) + retry_header = channel.headers.getRawHeaders("Retry-After") else: self.assertEqual(channel.code, 200, msg=channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. - self.assertTrue(retry_after_ms < 6000) + self.assertLess(retry_after_ms, 6000) + assert retry_header + self.assertLessEqual(int(retry_header[0]), 6) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) @@ -217,7 +221,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # which sets these values to 10000, but as we're overriding the entire # rc_login dict here, we need to set this manually as well "address": {"per_second": 10000, "burst_count": 10000}, - } + }, + "experimental_features": {"msc4041_enabled": True}, } ) def test_POST_ratelimiting_per_account(self) -> None: @@ -234,12 +239,15 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): if i == 5: self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) + retry_header = channel.headers.getRawHeaders("Retry-After") else: self.assertEqual(channel.code, 200, msg=channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. - self.assertTrue(retry_after_ms < 6000) + self.assertLess(retry_after_ms, 6000) + assert retry_header + self.assertLessEqual(int(retry_header[0]), 6) self.reactor.advance(retry_after_ms / 1000.0) @@ -262,7 +270,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # rc_login dict here, we need to set this manually as well "address": {"per_second": 10000, "burst_count": 10000}, "failed_attempts": {"per_second": 0.17, "burst_count": 5}, - } + }, + "experimental_features": {"msc4041_enabled": True}, } ) def test_POST_ratelimiting_per_account_failed_attempts(self) -> None: @@ -279,12 +288,15 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): if i == 5: self.assertEqual(channel.code, 429, msg=channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) + retry_header = channel.headers.getRawHeaders("Retry-After") else: self.assertEqual(channel.code, 403, msg=channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. - self.assertTrue(retry_after_ms < 6000) + self.assertLess(retry_after_ms, 6000) + assert retry_header + self.assertLessEqual(int(retry_header[0]), 6) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) From e691243e191d9dad2bcbf55f9659d007f75fd28e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 24 Aug 2023 15:53:07 +0100 Subject: [PATCH 367/562] Fix typechecking with twisted trunk (#16121) --- .github/workflows/twisted_trunk.yml | 4 ++-- changelog.d/16121.misc | 1 + synapse/handlers/message.py | 32 +++++++++++++-------------- synapse/logging/context.py | 19 ++++++++-------- synapse/util/caches/deferred_cache.py | 2 +- tests/util/test_async_helpers.py | 14 +++++------- 6 files changed, 36 insertions(+), 36 deletions(-) create mode 100644 changelog.d/16121.misc diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 67ccc03f6e..7d629a4ed0 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -54,8 +54,8 @@ jobs: poetry remove twisted poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }} poetry install --no-interaction --extras "all test" - - name: Remove warn_unused_ignores from mypy config - run: sed '/warn_unused_ignores = True/d' -i mypy.ini + - name: Remove unhelpful options from mypy config + run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini - run: poetry run mypy trial: diff --git a/changelog.d/16121.misc b/changelog.d/16121.misc new file mode 100644 index 0000000000..f325d2a31d --- /dev/null +++ b/changelog.d/16121.misc @@ -0,0 +1 @@ +Attempt to fix the twisted trunk job. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a74db1dccf..3184bfb047 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1474,23 +1474,23 @@ class EventCreationHandler: # We now persist the event (and update the cache in parallel, since we # don't want to block on it). - event, context = events_and_context[0] + # + # Note: mypy gets confused if we inline dl and check with twisted#11770. + # Some kind of bug in mypy's deduction? + deferreds = ( + run_in_background( + self._persist_events, + requester=requester, + events_and_context=events_and_context, + ratelimit=ratelimit, + extra_users=extra_users, + ), + run_in_background( + self.cache_joined_hosts_for_events, events_and_context + ).addErrback(log_failure, "cache_joined_hosts_for_event failed"), + ) result, _ = await make_deferred_yieldable( - gather_results( - ( - run_in_background( - self._persist_events, - requester=requester, - events_and_context=events_and_context, - ratelimit=ratelimit, - extra_users=extra_users, - ), - run_in_background( - self.cache_joined_hosts_for_events, events_and_context - ).addErrback(log_failure, "cache_joined_hosts_for_event failed"), - ), - consumeErrors=True, - ) + gather_results(deferreds, consumeErrors=True) ).addErrback(unwrapFirstError) return result diff --git a/synapse/logging/context.py b/synapse/logging/context.py index f62bea968f..64c6ae4512 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -809,23 +809,24 @@ def run_in_background( # type: ignore[misc] # `res` may be a coroutine, `Deferred`, some other kind of awaitable, or a plain # value. Convert it to a `Deferred`. + d: "defer.Deferred[R]" if isinstance(res, typing.Coroutine): # Wrap the coroutine in a `Deferred`. - res = defer.ensureDeferred(res) + d = defer.ensureDeferred(res) elif isinstance(res, defer.Deferred): - pass + d = res elif isinstance(res, Awaitable): # `res` is probably some kind of completed awaitable, such as a `DoneAwaitable` # or `Future` from `make_awaitable`. - res = defer.ensureDeferred(_unwrap_awaitable(res)) + d = defer.ensureDeferred(_unwrap_awaitable(res)) else: # `res` is a plain value. Wrap it in a `Deferred`. - res = defer.succeed(res) + d = defer.succeed(res) - if res.called and not res.paused: + if d.called and not d.paused: # The function should have maintained the logcontext, so we can # optimise out the messing about - return res + return d # The function may have reset the context before returning, so # we need to restore it now. @@ -843,8 +844,8 @@ def run_in_background( # type: ignore[misc] # which is supposed to have a single entry and exit point. But # by spawning off another deferred, we are effectively # adding a new exit point.) - res.addBoth(_set_context_cb, ctx) - return res + d.addBoth(_set_context_cb, ctx) + return d T = TypeVar("T") @@ -877,7 +878,7 @@ def make_deferred_yieldable(deferred: "defer.Deferred[T]") -> "defer.Deferred[T] ResultT = TypeVar("ResultT") -def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT: +def _set_context_cb(result: ResultT, context: LoggingContextOrSentinel) -> ResultT: """A callback function which just sets the logging context""" set_current_context(context) return result diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index bf7bd351e0..029eedcc6f 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -470,7 +470,7 @@ class CacheMultipleEntries(CacheEntry[KT, VT]): def deferred(self, key: KT) -> "defer.Deferred[VT]": if not self._deferred: self._deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True) - return self._deferred.observe().addCallback(lambda res: res.get(key)) + return self._deferred.observe().addCallback(lambda res: res[key]) def add_invalidation_callback( self, key: KT, callback: Optional[Callable[[], None]] diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index 91cac9822a..05983ed434 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -60,11 +60,9 @@ class ObservableDeferredTest(TestCase): observer1.addBoth(check_called_first) # store the results - results: List[Optional[ObservableDeferred[int]]] = [None, None] + results: List[Optional[int]] = [None, None] - def check_val( - res: ObservableDeferred[int], idx: int - ) -> ObservableDeferred[int]: + def check_val(res: int, idx: int) -> int: results[idx] = res return res @@ -93,14 +91,14 @@ class ObservableDeferredTest(TestCase): observer1.addBoth(check_called_first) # store the results - results: List[Optional[ObservableDeferred[str]]] = [None, None] + results: List[Optional[Failure]] = [None, None] - def check_val(res: ObservableDeferred[str], idx: int) -> None: + def check_failure(res: Failure, idx: int) -> None: results[idx] = res return None - observer1.addErrback(check_val, 0) - observer2.addErrback(check_val, 1) + observer1.addErrback(check_failure, 0) + observer2.addErrback(check_failure, 1) try: raise Exception("gah!") From 5427cc20b90b7232ec678967bb04ef604923200f Mon Sep 17 00:00:00 2001 From: Amirreza Aflakparast <84932095+AmirAflak@users.noreply.github.com> Date: Thu, 24 Aug 2023 18:36:06 +0330 Subject: [PATCH 368/562] Update URLs to matrix.org blog categories. (#16008) --- changelog.d/16008.doc | 1 + docs/development/releases.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16008.doc diff --git a/changelog.d/16008.doc b/changelog.d/16008.doc new file mode 100644 index 0000000000..1142224951 --- /dev/null +++ b/changelog.d/16008.doc @@ -0,0 +1 @@ +Update links to the matrix.org blog. diff --git a/docs/development/releases.md b/docs/development/releases.md index c9a8c69945..6e83c81e27 100644 --- a/docs/development/releases.md +++ b/docs/development/releases.md @@ -12,7 +12,7 @@ Note that this schedule might be modified depending on the availability of the Synapse team, e.g. releases may be skipped to avoid holidays. Release announcements can be found in the -[release category of the Matrix blog](https://matrix.org/blog/category/releases). +[release category of the Matrix blog](https://matrix.org/category/releases). ## Bugfix releases @@ -34,4 +34,4 @@ be held to be released together. In some cases, a pre-disclosure of a security release will be issued as a notice to Synapse operators that there is an upcoming security release. These can be -found in the [security category of the Matrix blog](https://matrix.org/blog/category/security). +found in the [security category of the Matrix blog](https://matrix.org/category/security). From efdb87c8981e1de946e7454ae612c3b58901030c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Aug 2023 15:50:41 -0400 Subject: [PATCH 369/562] Bump anyhow from 1.0.72 to 1.0.75 (#16141) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f53e4f6205..ad88335f31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.72" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arc-swap" From aeeca2a62ebfb601efa7930acae0897c8d3e43df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Grimpard?= Date: Thu, 24 Aug 2023 22:11:23 +0200 Subject: [PATCH 370/562] Add configuration setting for CAS protocol version (#15816) --- changelog.d/15816.feature | 1 + docs/usage/configuration/config_documentation.md | 2 ++ synapse/config/cas.py | 13 ++++++++++++- synapse/handlers/cas.py | 6 +++++- 4 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15816.feature diff --git a/changelog.d/15816.feature b/changelog.d/15816.feature new file mode 100644 index 0000000000..9248dd6792 --- /dev/null +++ b/changelog.d/15816.feature @@ -0,0 +1 @@ +Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 743c51d76a..235f873860 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3420,6 +3420,7 @@ Has the following sub-options: to style the login flow according to the identity provider in question. See the [spec](https://spec.matrix.org/latest/) for possible options here. * `server_url`: The URL of the CAS authorization endpoint. +* `protocol_version`: The CAS protocol version, defaults to none (version 3 is required if you want to use "required_attributes"). * `displayname_attribute`: The attribute of the CAS response to use as the display name. If no name is given here, no displayname will be set. * `required_attributes`: It is possible to configure Synapse to only allow logins if CAS attributes @@ -3433,6 +3434,7 @@ Example configuration: cas_config: enabled: true server_url: "https://cas-server.com" + protocol_version: 3 displayname_attribute: name required_attributes: userGroup: "staff" diff --git a/synapse/config/cas.py b/synapse/config/cas.py index c4e63e7411..6e2d9addbf 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -18,7 +18,7 @@ from typing import Any, List from synapse.config.sso import SsoAttributeRequirement from synapse.types import JsonDict -from ._base import Config +from ._base import Config, ConfigError from ._util import validate_config @@ -41,6 +41,16 @@ class CasConfig(Config): public_baseurl = self.root.server.public_baseurl self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket" + self.cas_protocol_version = cas_config.get("protocol_version") + if ( + self.cas_protocol_version is not None + and self.cas_protocol_version not in [1, 2, 3] + ): + raise ConfigError( + "Unsupported CAS protocol version %s (only versions 1, 2, 3 are supported)" + % (self.cas_protocol_version,), + ("cas_config", "protocol_version"), + ) self.cas_displayname_attribute = cas_config.get("displayname_attribute") required_attributes = cas_config.get("required_attributes") or {} self.cas_required_attributes = _parsed_required_attributes_def( @@ -54,6 +64,7 @@ class CasConfig(Config): else: self.cas_server_url = None self.cas_service_url = None + self.cas_protocol_version = None self.cas_displayname_attribute = None self.cas_required_attributes = [] diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py index 5c71637038..a850545453 100644 --- a/synapse/handlers/cas.py +++ b/synapse/handlers/cas.py @@ -67,6 +67,7 @@ class CasHandler: self._cas_server_url = hs.config.cas.cas_server_url self._cas_service_url = hs.config.cas.cas_service_url + self._cas_protocol_version = hs.config.cas.cas_protocol_version self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute self._cas_required_attributes = hs.config.cas.cas_required_attributes @@ -121,7 +122,10 @@ class CasHandler: Returns: The parsed CAS response. """ - uri = self._cas_server_url + "/proxyValidate" + if self._cas_protocol_version == 3: + uri = self._cas_server_url + "/p3/proxyValidate" + else: + uri = self._cas_server_url + "/proxyValidate" args = { "ticket": ticket, "service": self._build_service_param(service_args), From 5856a8ba4243a11f0d5f97ed2fe742ae31341452 Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 24 Aug 2023 13:57:53 -0700 Subject: [PATCH 371/562] Document `exclude_rooms_fom_sync` configuration option (#16178) --- changelog.d/16178.doc | 1 + docs/usage/configuration/config_documentation.md | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelog.d/16178.doc diff --git a/changelog.d/16178.doc b/changelog.d/16178.doc new file mode 100644 index 0000000000..ea21e19240 --- /dev/null +++ b/changelog.d/16178.doc @@ -0,0 +1 @@ +Document `exclude_rooms_from_sync` configuration option. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 235f873860..0b1725816e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3867,6 +3867,19 @@ Example configuration: ```yaml forget_rooms_on_leave: false ``` +--- +### `exclude_rooms_from_sync` +A list of rooms to exclude from sync responses. This is useful for server +administrators wishing to group users into a room without these users being able +to see it from their client. + +By default, no room is excluded. + +Example configuration: +```yaml +exclude_rooms_from_sync: + - !foo:example.com +``` --- ## Opentracing From daf11e26efc210dccaef029422431a7d2803dd8a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 24 Aug 2023 19:38:46 -0400 Subject: [PATCH 372/562] Replace make_awaitable with AsyncMock (#16179) Python 3.8 provides a native AsyncMock, we can replace the homegrown version we have. --- changelog.d/16179.misc | 1 + tests/crypto/test_keyring.py | 5 +- tests/federation/test_complexity.py | 33 +++-- tests/federation/test_federation_catch_up.py | 8 +- tests/federation/test_federation_sender.py | 42 +++--- tests/handlers/test_appservice.py | 113 +++++++-------- tests/handlers/test_auth.py | 27 ++-- tests/handlers/test_device.py | 13 +- tests/handlers/test_directory.py | 12 +- tests/handlers/test_e2e_keys.py | 130 +++++++++--------- tests/handlers/test_federation.py | 52 ++++--- tests/handlers/test_federation_event.py | 109 +++++++-------- tests/handlers/test_password_providers.py | 51 ++++--- tests/handlers/test_profile.py | 9 +- tests/handlers/test_register.py | 33 ++--- tests/handlers/test_room_member.py | 33 ++--- tests/handlers/test_sync.py | 7 +- tests/handlers/test_typing.py | 51 ++++--- tests/handlers/test_user_directory.py | 6 +- .../test_matrix_federation_agent.py | 60 ++++---- .../test_federation_sender_shard.py | 13 +- tests/rest/admin/test_user.py | 16 +-- tests/rest/client/test_account_data.py | 5 +- tests/rest/client/test_presence.py | 5 +- tests/rest/client/test_relations.py | 9 +- tests/rest/client/test_rooms.py | 37 +++-- tests/rest/client/test_third_party_rules.py | 35 +++-- tests/rest/client/test_transactions.py | 9 +- .../test_resource_limits_server_notices.py | 83 +++++------ tests/storage/test_appservice.py | 5 +- tests/storage/test_background_update.py | 10 +- tests/storage/test_client_ips.py | 7 +- tests/storage/test_monthly_active_users.py | 23 ++-- .../util/test_partial_state_events_tracker.py | 8 +- tests/test_federation.py | 40 +++--- tests/test_utils/__init__.py | 12 -- 36 files changed, 508 insertions(+), 604 deletions(-) create mode 100644 changelog.d/16179.misc diff --git a/changelog.d/16179.misc b/changelog.d/16179.misc new file mode 100644 index 0000000000..8d04954ab9 --- /dev/null +++ b/changelog.d/16179.misc @@ -0,0 +1 @@ +Use `AsyncMock` instead of custom code. diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 2be341ac7b..f93ba5d4cf 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -13,7 +13,7 @@ # limitations under the License. import time from typing import Any, Dict, List, Optional, cast -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock import attr import canonicaljson @@ -45,7 +45,6 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable from tests.unittest import logcontext_clean, override_config @@ -291,7 +290,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): with a null `ts_valid_until_ms` """ mock_fetcher = Mock() - mock_fetcher.get_keys = Mock(return_value=make_awaitable({})) + mock_fetcher.get_keys = AsyncMock(return_value={}) key1 = signedjson.key.generate_signing_key("1") r = self.hs.get_datastores().main.store_server_signature_keys( diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 129d7cfd93..5b58fb13b5 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock +from unittest.mock import AsyncMock from synapse.api.errors import Codes, SynapseError from synapse.rest import admin @@ -20,7 +20,6 @@ from synapse.rest.client import login, room from synapse.types import JsonDict, UserID, create_requester from tests import unittest -from tests.test_utils import make_awaitable class RoomComplexityTests(unittest.FederatingHomeserverTestCase): @@ -75,9 +74,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] - handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] - return_value=make_awaitable(("", 1)) + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + return_value=("", 1) ) d = handler._remote_join( @@ -106,9 +105,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] - handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] - return_value=make_awaitable(("", 1)) + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + return_value=("", 1) ) d = handler._remote_join( @@ -143,9 +142,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] - handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] - return_value=make_awaitable(("", 1)) + fed_transport.client.get_json = AsyncMock(return_value=None) # type: ignore[assignment] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + return_value=("", 1) ) # Artificially raise the complexity @@ -200,9 +199,9 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] - handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] - return_value=make_awaitable(("", 1)) + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + return_value=("", 1) ) d = handler._remote_join( @@ -230,9 +229,9 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment] - handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] - return_value=make_awaitable(("", 1)) + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + return_value=("", 1) ) d = handler._remote_join( diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index b290b020a2..40318aa1b6 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,6 +1,6 @@ from typing import Callable, Collection, List, Optional, Tuple from unittest import mock -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -19,7 +19,7 @@ from synapse.types import JsonDict from synapse.util import Clock from synapse.util.retryutils import NotRetryingDestination -from tests.test_utils import event_injection, make_awaitable +from tests.test_utils import event_injection from tests.unittest import FederatingHomeserverTestCase @@ -50,8 +50,8 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): # This mock is crucial for destination_rooms to be populated. # TODO: this seems to no longer be the case---tests pass with this mock # commented out. - state_storage_controller.get_current_hosts_in_room = Mock( # type: ignore[assignment] - return_value=make_awaitable({"test", "host2"}) + state_storage_controller.get_current_hosts_in_room = AsyncMock( # type: ignore[assignment] + return_value={"test", "host2"} ) # whenever send_transaction is called, record the pdu data diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 9e104fd96a..5ea4a75a9f 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, FrozenSet, List, Optional, Set -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from signedjson import key, sign from signedjson.types import BaseKey, SigningKey @@ -29,7 +29,6 @@ from synapse.server import HomeServer from synapse.types import JsonDict, ReadReceipt from synapse.util import Clock -from tests.test_utils import make_awaitable from tests.unittest import HomeserverTestCase @@ -43,12 +42,13 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.federation_transport_client = Mock(spec=["send_transaction"]) + self.federation_transport_client.send_transaction = AsyncMock() hs = self.setup_test_homeserver( federation_transport_client=self.federation_transport_client, ) - hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[assignment] - return_value=make_awaitable({"test", "host2"}) + hs.get_storage_controllers().state.get_current_hosts_in_room = AsyncMock( # type: ignore[assignment] + return_value={"test", "host2"} ) hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[assignment] @@ -64,7 +64,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): def test_send_receipts(self) -> None: mock_send_transaction = self.federation_transport_client.send_transaction - mock_send_transaction.return_value = make_awaitable({}) + mock_send_transaction.return_value = {} sender = self.hs.get_federation_sender() receipt = ReadReceipt( @@ -104,7 +104,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): def test_send_receipts_thread(self) -> None: mock_send_transaction = self.federation_transport_client.send_transaction - mock_send_transaction.return_value = make_awaitable({}) + mock_send_transaction.return_value = {} # Create receipts for: # @@ -180,7 +180,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): """Send two receipts in quick succession; the second should be flushed, but only after 20ms""" mock_send_transaction = self.federation_transport_client.send_transaction - mock_send_transaction.return_value = make_awaitable({}) + mock_send_transaction.return_value = {} sender = self.hs.get_federation_sender() receipt = ReadReceipt( @@ -276,6 +276,8 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.federation_transport_client = Mock( spec=["send_transaction", "query_user_devices"] ) + self.federation_transport_client.send_transaction = AsyncMock() + self.federation_transport_client.query_user_devices = AsyncMock() return self.setup_test_homeserver( federation_transport_client=self.federation_transport_client, ) @@ -317,13 +319,13 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.record_transaction ) - def record_transaction( + async def record_transaction( self, txn: Transaction, json_cb: Optional[Callable[[], JsonDict]] = None - ) -> "defer.Deferred[JsonDict]": + ) -> JsonDict: assert json_cb is not None data = json_cb() self.edus.extend(data["edus"]) - return defer.succeed({}) + return {} def test_send_device_updates(self) -> None: """Basic case: each device update should result in an EDU""" @@ -354,15 +356,11 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # Send the server a device list EDU for the other user, this will cause # it to try and resync the device lists. - self.federation_transport_client.query_user_devices.return_value = ( - make_awaitable( - { - "stream_id": "1", - "user_id": "@user2:host2", - "devices": [{"device_id": "D1"}], - } - ) - ) + self.federation_transport_client.query_user_devices.return_value = { + "stream_id": "1", + "user_id": "@user2:host2", + "devices": [{"device_id": "D1"}], + } self.get_success( self.device_handler.device_list_updater.incoming_device_list_update( @@ -533,7 +531,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): recovery """ mock_send_txn = self.federation_transport_client.send_transaction - mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) + mock_send_txn.side_effect = AssertionError("fail") # create devices u1 = self.register_user("user", "pass") @@ -578,7 +576,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): This case tests the behaviour when the server has never been reachable. """ mock_send_txn = self.federation_transport_client.send_transaction - mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) + mock_send_txn.side_effect = AssertionError("fail") # create devices u1 = self.register_user("user", "pass") @@ -636,7 +634,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # now the server goes offline mock_send_txn = self.federation_transport_client.send_transaction - mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) + mock_send_txn.side_effect = AssertionError("fail") self.login("user", "pass", device_id="D2") self.login("user", "pass", device_id="D3") diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 9014e60577..5e2ae82cd4 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Dict, Iterable, List, Optional -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from parameterized import parameterized @@ -36,7 +36,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest -from tests.test_utils import event_injection, make_awaitable, simple_async_mock +from tests.test_utils import event_injection, simple_async_mock from tests.unittest import override_config from tests.utils import MockClock @@ -46,15 +46,13 @@ class AppServiceHandlerTestCase(unittest.TestCase): def setUp(self) -> None: self.mock_store = Mock() - self.mock_as_api = Mock() + self.mock_as_api = AsyncMock() self.mock_scheduler = Mock() hs = Mock() hs.get_datastores.return_value = Mock(main=self.mock_store) - self.mock_store.get_appservice_last_pos.return_value = make_awaitable(None) - self.mock_store.set_appservice_last_pos.return_value = make_awaitable(None) - self.mock_store.set_appservice_stream_type_pos.return_value = make_awaitable( - None - ) + self.mock_store.get_appservice_last_pos = AsyncMock(return_value=None) + self.mock_store.set_appservice_last_pos = AsyncMock(return_value=None) + self.mock_store.set_appservice_stream_type_pos = AsyncMock(return_value=None) hs.get_application_service_api.return_value = self.mock_as_api hs.get_application_service_scheduler.return_value = self.mock_scheduler hs.get_clock.return_value = MockClock() @@ -69,21 +67,25 @@ class AppServiceHandlerTestCase(unittest.TestCase): self._mkservice(is_interested_in_event=False), ] - self.mock_as_api.query_user.return_value = make_awaitable(True) + self.mock_as_api.query_user.return_value = True self.mock_store.get_app_services.return_value = services - self.mock_store.get_user_by_id.return_value = make_awaitable([]) + self.mock_store.get_user_by_id = AsyncMock(return_value=[]) event = Mock( sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar" ) - self.mock_store.get_all_new_event_ids_stream.side_effect = [ - make_awaitable((0, {})), - make_awaitable((1, {event.event_id: 0})), - ] - self.mock_store.get_events_as_list.side_effect = [ - make_awaitable([]), - make_awaitable([event]), - ] + self.mock_store.get_all_new_event_ids_stream = AsyncMock( + side_effect=[ + (0, {}), + (1, {event.event_id: 0}), + ] + ) + self.mock_store.get_events_as_list = AsyncMock( + side_effect=[ + [], + [event], + ] + ) self.handler.notify_interested_services(RoomStreamToken(None, 1)) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( @@ -95,14 +97,16 @@ class AppServiceHandlerTestCase(unittest.TestCase): services = [self._mkservice(is_interested_in_event=True)] services[0].is_interested_in_user.return_value = True self.mock_store.get_app_services.return_value = services - self.mock_store.get_user_by_id.return_value = make_awaitable(None) + self.mock_store.get_user_by_id = AsyncMock(return_value=None) event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar") - self.mock_as_api.query_user.return_value = make_awaitable(True) - self.mock_store.get_all_new_event_ids_stream.side_effect = [ - make_awaitable((0, {event.event_id: 0})), - ] - self.mock_store.get_events_as_list.side_effect = [make_awaitable([event])] + self.mock_as_api.query_user.return_value = True + self.mock_store.get_all_new_event_ids_stream = AsyncMock( + side_effect=[ + (0, {event.event_id: 0}), + ] + ) + self.mock_store.get_events_as_list = AsyncMock(side_effect=[[event]]) self.handler.notify_interested_services(RoomStreamToken(None, 0)) self.mock_as_api.query_user.assert_called_once_with(services[0], user_id) @@ -112,13 +116,15 @@ class AppServiceHandlerTestCase(unittest.TestCase): services = [self._mkservice(is_interested_in_event=True)] services[0].is_interested_in_user.return_value = True self.mock_store.get_app_services.return_value = services - self.mock_store.get_user_by_id.return_value = make_awaitable({"name": user_id}) + self.mock_store.get_user_by_id = AsyncMock(return_value={"name": user_id}) event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar") - self.mock_as_api.query_user.return_value = make_awaitable(True) - self.mock_store.get_all_new_event_ids_stream.side_effect = [ - make_awaitable((0, [event], {event.event_id: 0})), - ] + self.mock_as_api.query_user.return_value = True + self.mock_store.get_all_new_event_ids_stream = AsyncMock( + side_effect=[ + (0, [event], {event.event_id: 0}), + ] + ) self.handler.notify_interested_services(RoomStreamToken(None, 0)) @@ -141,10 +147,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): self._mkservice_alias(is_room_alias_in_namespace=False), ] - self.mock_as_api.query_alias.return_value = make_awaitable(True) + self.mock_as_api.query_alias = AsyncMock(return_value=True) self.mock_store.get_app_services.return_value = services - self.mock_store.get_association_from_room_alias.return_value = make_awaitable( - Mock(room_id=room_id, servers=servers) + self.mock_store.get_association_from_room_alias = AsyncMock( + return_value=Mock(room_id=room_id, servers=servers) ) result = self.successResultOf( @@ -177,7 +183,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): def test_get_3pe_protocols_protocol_no_response(self) -> None: service = self._mkservice(False, ["my-protocol"]) self.mock_store.get_app_services.return_value = [service] - self.mock_as_api.get_3pe_protocol.return_value = make_awaitable(None) + self.mock_as_api.get_3pe_protocol.return_value = None response = self.successResultOf( defer.ensureDeferred(self.handler.get_3pe_protocols()) ) @@ -189,9 +195,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): def test_get_3pe_protocols_select_one_protocol(self) -> None: service = self._mkservice(False, ["my-protocol"]) self.mock_store.get_app_services.return_value = [service] - self.mock_as_api.get_3pe_protocol.return_value = make_awaitable( - {"x-protocol-data": 42, "instances": []} - ) + self.mock_as_api.get_3pe_protocol.return_value = { + "x-protocol-data": 42, + "instances": [], + } response = self.successResultOf( defer.ensureDeferred(self.handler.get_3pe_protocols("my-protocol")) ) @@ -205,9 +212,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): def test_get_3pe_protocols_one_protocol(self) -> None: service = self._mkservice(False, ["my-protocol"]) self.mock_store.get_app_services.return_value = [service] - self.mock_as_api.get_3pe_protocol.return_value = make_awaitable( - {"x-protocol-data": 42, "instances": []} - ) + self.mock_as_api.get_3pe_protocol.return_value = { + "x-protocol-data": 42, + "instances": [], + } response = self.successResultOf( defer.ensureDeferred(self.handler.get_3pe_protocols()) ) @@ -222,9 +230,10 @@ class AppServiceHandlerTestCase(unittest.TestCase): service_one = self._mkservice(False, ["my-protocol"]) service_two = self._mkservice(False, ["other-protocol"]) self.mock_store.get_app_services.return_value = [service_one, service_two] - self.mock_as_api.get_3pe_protocol.return_value = make_awaitable( - {"x-protocol-data": 42, "instances": []} - ) + self.mock_as_api.get_3pe_protocol.return_value = { + "x-protocol-data": 42, + "instances": [], + } response = self.successResultOf( defer.ensureDeferred(self.handler.get_3pe_protocols()) ) @@ -287,13 +296,11 @@ class AppServiceHandlerTestCase(unittest.TestCase): interested_service = self._mkservice(is_interested_in_event=True) services = [interested_service] self.mock_store.get_app_services.return_value = services - self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( - 579 - ) + self.mock_store.get_type_stream_id_for_appservice = AsyncMock(return_value=579) event = Mock(event_id="event_1") - self.event_source.sources.receipt.get_new_events_as.return_value = ( - make_awaitable(([event], None)) + self.event_source.sources.receipt.get_new_events_as = AsyncMock( + return_value=([event], None) ) self.handler.notify_interested_services_ephemeral( @@ -317,13 +324,11 @@ class AppServiceHandlerTestCase(unittest.TestCase): services = [interested_service] self.mock_store.get_app_services.return_value = services - self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( - 580 - ) + self.mock_store.get_type_stream_id_for_appservice = AsyncMock(return_value=580) event = Mock(event_id="event_1") - self.event_source.sources.receipt.get_new_events_as.return_value = ( - make_awaitable(([event], None)) + self.event_source.sources.receipt.get_new_events_as = AsyncMock( + return_value=([event], None) ) self.handler.notify_interested_services_ephemeral( @@ -350,9 +355,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): A mock representing the ApplicationService. """ service = Mock() - service.is_interested_in_event.return_value = make_awaitable( - is_interested_in_event - ) + service.is_interested_in_event = AsyncMock(return_value=is_interested_in_event) service.token = "mock_service_token" service.url = "mock_service_url" service.protocols = protocols diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 036dbbc45b..413ff8795b 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional -from unittest.mock import Mock +from unittest.mock import AsyncMock import pymacaroons @@ -25,7 +25,6 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable class AuthTestCase(unittest.HomeserverTestCase): @@ -166,8 +165,8 @@ class AuthTestCase(unittest.HomeserverTestCase): def test_mau_limits_exceeded_large(self) -> None: self.auth_blocking._limit_usage_by_mau = True - self.hs.get_datastores().main.get_monthly_active_count = Mock( - return_value=make_awaitable(self.large_number_of_users) + self.hs.get_datastores().main.get_monthly_active_count = AsyncMock( + return_value=self.large_number_of_users ) self.get_failure( @@ -177,8 +176,8 @@ class AuthTestCase(unittest.HomeserverTestCase): ResourceLimitError, ) - self.hs.get_datastores().main.get_monthly_active_count = Mock( - return_value=make_awaitable(self.large_number_of_users) + self.hs.get_datastores().main.get_monthly_active_count = AsyncMock( + return_value=self.large_number_of_users ) token = self.get_success( self.auth_handler.create_login_token_for_user_id(self.user1) @@ -191,8 +190,8 @@ class AuthTestCase(unittest.HomeserverTestCase): self.auth_blocking._limit_usage_by_mau = True # Set the server to be at the edge of too many users. - self.hs.get_datastores().main.get_monthly_active_count = Mock( - return_value=make_awaitable(self.auth_blocking._max_mau_value) + self.hs.get_datastores().main.get_monthly_active_count = AsyncMock( + return_value=self.auth_blocking._max_mau_value ) # If not in monthly active cohort @@ -208,8 +207,8 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertIsNone(self.token_login(token)) # If in monthly active cohort - self.hs.get_datastores().main.user_last_seen_monthly_active = Mock( - return_value=make_awaitable(self.clock.time_msec()) + self.hs.get_datastores().main.user_last_seen_monthly_active = AsyncMock( + return_value=self.clock.time_msec() ) self.get_success( self.auth_handler.create_access_token_for_user_id( @@ -224,8 +223,8 @@ class AuthTestCase(unittest.HomeserverTestCase): def test_mau_limits_not_exceeded(self) -> None: self.auth_blocking._limit_usage_by_mau = True - self.hs.get_datastores().main.get_monthly_active_count = Mock( - return_value=make_awaitable(self.small_number_of_users) + self.hs.get_datastores().main.get_monthly_active_count = AsyncMock( + return_value=self.small_number_of_users ) # Ensure does not raise exception self.get_success( @@ -234,8 +233,8 @@ class AuthTestCase(unittest.HomeserverTestCase): ) ) - self.hs.get_datastores().main.get_monthly_active_count = Mock( - return_value=make_awaitable(self.small_number_of_users) + self.hs.get_datastores().main.get_monthly_active_count = AsyncMock( + return_value=self.small_number_of_users ) token = self.get_success( self.auth_handler.create_login_token_for_user_id(self.user1) diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index e1e58fa6e6..dca539d203 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -32,7 +32,6 @@ from synapse.types import JsonDict, create_requester from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable from tests.unittest import override_config user1 = "@boris:aaa" @@ -41,7 +40,7 @@ user2 = "@theresa:bbb" class DeviceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.appservice_api = mock.Mock() + self.appservice_api = mock.AsyncMock() hs = self.setup_test_homeserver( "server", application_service_api=self.appservice_api, @@ -375,13 +374,11 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) # Setup a response. - self.appservice_api.query_keys.return_value = make_awaitable( - { - "device_keys": { - local_user: {device_2: device_key_2b, device_3: device_key_3} - } + self.appservice_api.query_keys.return_value = { + "device_keys": { + local_user: {device_2: device_key_2b, device_3: device_key_3} } - ) + } # Request all devices. res = self.get_success( diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 90aec484c4..367d94eca3 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Awaitable, Callable, Dict -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -27,14 +27,13 @@ from synapse.types import JsonDict, RoomAlias, create_requester from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable class DirectoryTestCase(unittest.HomeserverTestCase): """Tests the directory service.""" def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.mock_federation = Mock() + self.mock_federation = AsyncMock() self.mock_registry = Mock() self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} @@ -73,9 +72,10 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEqual({"room_id": "!8765qwer:test", "servers": ["test"]}, result) def test_get_remote_association(self) -> None: - self.mock_federation.make_query.return_value = make_awaitable( - {"room_id": "!8765qwer:test", "servers": ["test", "remote"]} - ) + self.mock_federation.make_query.return_value = { + "room_id": "!8765qwer:test", + "servers": ["test", "remote"], + } result = self.get_success(self.handler.get_association(self.remote_room)) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 2eaffe511e..7917766a08 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -13,7 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable +from typing import Dict, Iterable from unittest import mock from parameterized import parameterized @@ -31,13 +31,12 @@ from synapse.types import JsonDict, UserID from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable from tests.unittest import override_config class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.appservice_api = mock.Mock() + self.appservice_api = mock.AsyncMock() return self.setup_test_homeserver( federation_client=mock.Mock(), application_service_api=self.appservice_api ) @@ -801,29 +800,27 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_client_keys = mock.Mock( # type: ignore[assignment] - return_value=make_awaitable( - { - "device_keys": {remote_user_id: {}}, - "master_keys": { - remote_user_id: { - "user_id": remote_user_id, - "usage": ["master"], - "keys": {"ed25519:" + remote_master_key: remote_master_key}, + self.hs.get_federation_client().query_client_keys = mock.AsyncMock( # type: ignore[assignment] + return_value={ + "device_keys": {remote_user_id: {}}, + "master_keys": { + remote_user_id: { + "user_id": remote_user_id, + "usage": ["master"], + "keys": {"ed25519:" + remote_master_key: remote_master_key}, + }, + }, + "self_signing_keys": { + remote_user_id: { + "user_id": remote_user_id, + "usage": ["self_signing"], + "keys": { + "ed25519:" + + remote_self_signing_key: remote_self_signing_key }, - }, - "self_signing_keys": { - remote_user_id: { - "user_id": remote_user_id, - "usage": ["self_signing"], - "keys": { - "ed25519:" - + remote_self_signing_key: remote_self_signing_key - }, - } - }, - } - ) + } + }, + } ) e2e_handler = self.hs.get_e2e_keys_handler() @@ -874,34 +871,29 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): # Pretend we're sharing a room with the user we're querying. If not, # `_query_devices_for_destination` will return early. - self.store.get_rooms_for_user = mock.Mock( - return_value=make_awaitable({"some_room_id"}) - ) + self.store.get_rooms_for_user = mock.AsyncMock(return_value={"some_room_id"}) remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_user_devices = mock.Mock( # type: ignore[assignment] - return_value=make_awaitable( - { + self.hs.get_federation_client().query_user_devices = mock.AsyncMock( # type: ignore[assignment] + return_value={ + "user_id": remote_user_id, + "stream_id": 1, + "devices": [], + "master_key": { "user_id": remote_user_id, - "stream_id": 1, - "devices": [], - "master_key": { - "user_id": remote_user_id, - "usage": ["master"], - "keys": {"ed25519:" + remote_master_key: remote_master_key}, + "usage": ["master"], + "keys": {"ed25519:" + remote_master_key: remote_master_key}, + }, + "self_signing_key": { + "user_id": remote_user_id, + "usage": ["self_signing"], + "keys": { + "ed25519:" + remote_self_signing_key: remote_self_signing_key }, - "self_signing_key": { - "user_id": remote_user_id, - "usage": ["self_signing"], - "keys": { - "ed25519:" - + remote_self_signing_key: remote_self_signing_key - }, - }, - } - ) + }, + } ) e2e_handler = self.hs.get_e2e_keys_handler() @@ -987,20 +979,20 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): mock_get_rooms = mock.patch.object( self.store, "get_rooms_for_user", - new_callable=mock.MagicMock, - return_value=make_awaitable(["some_room_id"]), + new_callable=mock.AsyncMock, + return_value=["some_room_id"], ) mock_get_users = mock.patch.object( self.store, "get_users_server_still_shares_room_with", - new_callable=mock.MagicMock, - return_value=make_awaitable({remote_user_id}), + new_callable=mock.AsyncMock, + return_value={remote_user_id}, ) mock_request = mock.patch.object( self.hs.get_federation_client(), "query_user_devices", - new_callable=mock.MagicMock, - return_value=make_awaitable(response_body), + new_callable=mock.AsyncMock, + return_value=response_body, ) with mock_get_rooms, mock_get_users, mock_request as mocked_federation_request: @@ -1060,8 +1052,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) # Setup a response, but only for device 2. - self.appservice_api.claim_client_keys.return_value = make_awaitable( - ({local_user: {device_id_2: otk}}, [(local_user, device_id_1, "alg1", 1)]) + self.appservice_api.claim_client_keys.return_value = ( + {local_user: {device_id_2: otk}}, + [(local_user, device_id_1, "alg1", 1)], ) # we shouldn't have any unused fallback keys yet @@ -1127,9 +1120,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) # Setup a response. - self.appservice_api.claim_client_keys.return_value = make_awaitable( - ({local_user: {device_id_1: {**as_otk, **as_fallback_key}}}, []) - ) + response: Dict[str, Dict[str, Dict[str, JsonDict]]] = { + local_user: {device_id_1: {**as_otk, **as_fallback_key}} + } + self.appservice_api.claim_client_keys.return_value = (response, []) # Claim OTKs, which will ask the appservice and do nothing else. claim_res = self.get_success( @@ -1171,8 +1165,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): self.assertEqual(fallback_res, ["alg1"]) # The appservice will return only the OTK. - self.appservice_api.claim_client_keys.return_value = make_awaitable( - ({local_user: {device_id_1: as_otk}}, []) + self.appservice_api.claim_client_keys.return_value = ( + {local_user: {device_id_1: as_otk}}, + [], ) # Claim OTKs, which should return the OTK from the appservice and the @@ -1234,8 +1229,9 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): self.assertEqual(fallback_res, ["alg1"]) # Finally, return only the fallback key from the appservice. - self.appservice_api.claim_client_keys.return_value = make_awaitable( - ({local_user: {device_id_1: as_fallback_key}}, []) + self.appservice_api.claim_client_keys.return_value = ( + {local_user: {device_id_1: as_fallback_key}}, + [], ) # Claim OTKs, which will return only the fallback key from the database. @@ -1350,13 +1346,11 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) # Setup a response. - self.appservice_api.query_keys.return_value = make_awaitable( - { - "device_keys": { - local_user: {device_2: device_key_2b, device_3: device_key_3} - } + self.appservice_api.query_keys.return_value = { + "device_keys": { + local_user: {device_2: device_key_2b, device_3: device_key_3} } - ) + } # Request all devices. res = self.get_success(self.handler.query_local_devices({local_user: None})) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 5f11d5df11..bd743b3578 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -14,7 +14,7 @@ import logging from typing import Collection, Optional, cast from unittest import TestCase -from unittest.mock import Mock, patch +from unittest.mock import AsyncMock, Mock, patch from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor @@ -40,7 +40,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest -from tests.test_utils import event_injection, make_awaitable +from tests.test_utils import event_injection logger = logging.getLogger(__name__) @@ -370,7 +370,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): # We mock out the FederationClient.backfill method, to pretend that a remote # server has returned our fake event. - federation_client_backfill_mock = Mock(return_value=make_awaitable([event])) + federation_client_backfill_mock = AsyncMock(return_value=[event]) self.hs.get_federation_client().backfill = federation_client_backfill_mock # type: ignore[assignment] # We also mock the persist method with a side effect of itself. This allows us @@ -631,33 +631,29 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): }, RoomVersions.V10, ) - mock_make_membership_event = Mock( - return_value=make_awaitable( - ( - "example.com", - membership_event, - RoomVersions.V10, - ) + mock_make_membership_event = AsyncMock( + return_value=( + "example.com", + membership_event, + RoomVersions.V10, ) ) - mock_send_join = Mock( - return_value=make_awaitable( - SendJoinResult( - membership_event, - "example.com", - state=[ - EVENT_CREATE, - EVENT_CREATOR_MEMBERSHIP, - EVENT_INVITATION_MEMBERSHIP, - ], - auth_chain=[ - EVENT_CREATE, - EVENT_CREATOR_MEMBERSHIP, - EVENT_INVITATION_MEMBERSHIP, - ], - partial_state=True, - servers_in_room={"example.com"}, - ) + mock_send_join = AsyncMock( + return_value=SendJoinResult( + membership_event, + "example.com", + state=[ + EVENT_CREATE, + EVENT_CREATOR_MEMBERSHIP, + EVENT_INVITATION_MEMBERSHIP, + ], + auth_chain=[ + EVENT_CREATE, + EVENT_CREATOR_MEMBERSHIP, + EVENT_INVITATION_MEMBERSHIP, + ], + partial_state=True, + servers_in_room={"example.com"}, ) ) diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 23f1b33b2f..70e6a7e142 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -35,7 +35,7 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.test_utils import event_injection, make_awaitable +from tests.test_utils import event_injection class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): @@ -50,6 +50,10 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): self.mock_federation_transport_client = mock.Mock( spec=["get_room_state_ids", "get_room_state", "get_event", "backfill"] ) + self.mock_federation_transport_client.get_room_state_ids = mock.AsyncMock() + self.mock_federation_transport_client.get_room_state = mock.AsyncMock() + self.mock_federation_transport_client.get_event = mock.AsyncMock() + self.mock_federation_transport_client.backfill = mock.AsyncMock() return super().setup_test_homeserver( federation_transport_client=self.mock_federation_transport_client ) @@ -198,20 +202,14 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): ) # we expect an outbound request to /state_ids, so stub that out - self.mock_federation_transport_client.get_room_state_ids.return_value = ( - make_awaitable( - { - "pdu_ids": [e.event_id for e in state_at_prev_event], - "auth_chain_ids": [], - } - ) - ) + self.mock_federation_transport_client.get_room_state_ids.return_value = { + "pdu_ids": [e.event_id for e in state_at_prev_event], + "auth_chain_ids": [], + } # we also expect an outbound request to /state self.mock_federation_transport_client.get_room_state.return_value = ( - make_awaitable( - StateRequestResponse(auth_events=[], state=state_at_prev_event) - ) + StateRequestResponse(auth_events=[], state=state_at_prev_event) ) # we have to bump the clock a bit, to keep the retry logic in @@ -273,26 +271,23 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): room_version = self.get_success(main_store.get_room_version(room_id)) # We expect an outbound request to /state_ids, so stub that out - self.mock_federation_transport_client.get_room_state_ids.return_value = make_awaitable( - { - # Mimic the other server not knowing about the state at all. - # We want to cause Synapse to throw an error (`Unable to get - # missing prev_event $fake_prev_event`) and fail to backfill - # the pulled event. - "pdu_ids": [], - "auth_chain_ids": [], - } - ) + self.mock_federation_transport_client.get_room_state_ids.return_value = { + # Mimic the other server not knowing about the state at all. + # We want to cause Synapse to throw an error (`Unable to get + # missing prev_event $fake_prev_event`) and fail to backfill + # the pulled event. + "pdu_ids": [], + "auth_chain_ids": [], + } + # We also expect an outbound request to /state - self.mock_federation_transport_client.get_room_state.return_value = make_awaitable( - StateRequestResponse( - # Mimic the other server not knowing about the state at all. - # We want to cause Synapse to throw an error (`Unable to get - # missing prev_event $fake_prev_event`) and fail to backfill - # the pulled event. - auth_events=[], - state=[], - ) + self.mock_federation_transport_client.get_room_state.return_value = StateRequestResponse( + # Mimic the other server not knowing about the state at all. + # We want to cause Synapse to throw an error (`Unable to get + # missing prev_event $fake_prev_event`) and fail to backfill + # the pulled event. + auth_events=[], + state=[], ) pulled_event = make_event_from_dict( @@ -545,25 +540,23 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): ) # We expect an outbound request to /backfill, so stub that out - self.mock_federation_transport_client.backfill.return_value = make_awaitable( - { - "origin": self.OTHER_SERVER_NAME, - "origin_server_ts": 123, - "pdus": [ - # This is one of the important aspects of this test: we include - # `pulled_event_without_signatures` so it fails the signature check - # when we filter down the backfill response down to events which - # have valid signatures in - # `_check_sigs_and_hash_for_pulled_events_and_fetch` - pulled_event_without_signatures.get_pdu_json(), - # Then later when we process this valid signature event, when we - # fetch the missing `prev_event`s, we want to make sure that we - # backoff and don't try and fetch `pulled_event_without_signatures` - # again since we know it just had an invalid signature. - pulled_event.get_pdu_json(), - ], - } - ) + self.mock_federation_transport_client.backfill.return_value = { + "origin": self.OTHER_SERVER_NAME, + "origin_server_ts": 123, + "pdus": [ + # This is one of the important aspects of this test: we include + # `pulled_event_without_signatures` so it fails the signature check + # when we filter down the backfill response down to events which + # have valid signatures in + # `_check_sigs_and_hash_for_pulled_events_and_fetch` + pulled_event_without_signatures.get_pdu_json(), + # Then later when we process this valid signature event, when we + # fetch the missing `prev_event`s, we want to make sure that we + # backoff and don't try and fetch `pulled_event_without_signatures` + # again since we know it just had an invalid signature. + pulled_event.get_pdu_json(), + ], + } # Keep track of the count and make sure we don't make any of these requests event_endpoint_requested_count = 0 @@ -731,15 +724,13 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): ) # We expect an outbound request to /backfill, so stub that out - self.mock_federation_transport_client.backfill.return_value = make_awaitable( - { - "origin": self.OTHER_SERVER_NAME, - "origin_server_ts": 123, - "pdus": [ - pulled_event.get_pdu_json(), - ], - } - ) + self.mock_federation_transport_client.backfill.return_value = { + "origin": self.OTHER_SERVER_NAME, + "origin_server_ts": 123, + "pdus": [ + pulled_event.get_pdu_json(), + ], + } # The function under test: try to backfill and process the pulled event with LoggingContext("test"): diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 394006f5f3..4496370c3f 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -16,7 +16,7 @@ from http import HTTPStatus from typing import Any, Dict, List, Optional, Type, Union -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -32,7 +32,6 @@ from synapse.util import Clock from tests import unittest from tests.server import FakeChannel -from tests.test_utils import make_awaitable from tests.unittest import override_config # Login flows we expect to appear in the list after the normal ones. @@ -187,7 +186,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.assertEqual(flows, [{"type": "m.login.password"}] + ADDITIONAL_LOGIN_FLOWS) # check_password must return an awaitable - mock_password_provider.check_password.return_value = make_awaitable(True) + mock_password_provider.check_password = AsyncMock(return_value=True) channel = self._send_password_login("u", "p") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@u:test", channel.json_body["user_id"]) @@ -209,13 +208,13 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): """UI Auth should delegate correctly to the password provider""" # log in twice, to get two devices - mock_password_provider.check_password.return_value = make_awaitable(True) + mock_password_provider.check_password = AsyncMock(return_value=True) tok1 = self.login("u", "p") self.login("u", "p", device_id="dev2") mock_password_provider.reset_mock() # have the auth provider deny the request to start with - mock_password_provider.check_password.return_value = make_awaitable(False) + mock_password_provider.check_password = AsyncMock(return_value=False) # make the initial request which returns a 401 session = self._start_delete_device_session(tok1, "dev2") @@ -229,7 +228,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # Finally, check the request goes through when we allow it - mock_password_provider.check_password.return_value = make_awaitable(True) + mock_password_provider.check_password = AsyncMock(return_value=True) channel = self._authed_delete_device(tok1, "dev2", session, "u", "p") self.assertEqual(channel.code, 200) mock_password_provider.check_password.assert_called_once_with("@u:test", "p") @@ -243,7 +242,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # check_password must return an awaitable - mock_password_provider.check_password.return_value = make_awaitable(False) + mock_password_provider.check_password = AsyncMock(return_value=False) channel = self._send_password_login("u", "p") self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) @@ -260,7 +259,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # have the auth provider deny the request - mock_password_provider.check_password.return_value = make_awaitable(False) + mock_password_provider.check_password = AsyncMock(return_value=False) # log in twice, to get two devices tok1 = self.login("localuser", "localpass") @@ -303,7 +302,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # check_password must return an awaitable - mock_password_provider.check_password.return_value = make_awaitable(False) + mock_password_provider.check_password = AsyncMock(return_value=False) channel = self._send_password_login("localuser", "localpass") self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -325,7 +324,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.register_user("localuser", "localpass") # allow login via the auth provider - mock_password_provider.check_password.return_value = make_awaitable(True) + mock_password_provider.check_password = AsyncMock(return_value=True) # log in twice, to get two devices tok1 = self.login("localuser", "p") @@ -342,7 +341,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.check_password.assert_not_called() # now try deleting with the local password - mock_password_provider.check_password.return_value = make_awaitable(False) + mock_password_provider.check_password = AsyncMock(return_value=False) channel = self._authed_delete_device( tok1, "dev2", session, "localuser", "localpass" ) @@ -396,9 +395,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() - mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:test", None) - ) + mock_password_provider.check_auth = AsyncMock(return_value=("@user:test", None)) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@user:test", channel.json_body["user_id"]) @@ -447,9 +444,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # right params, but authing as the wrong user - mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:test", None) - ) + mock_password_provider.check_auth = AsyncMock(return_value=("@user:test", None)) body["auth"]["test_field"] = "foo" channel = self._delete_device(tok1, "dev2", body) self.assertEqual(channel.code, 403) @@ -460,8 +455,8 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): mock_password_provider.reset_mock() # and finally, succeed - mock_password_provider.check_auth.return_value = make_awaitable( - ("@localuser:test", None) + mock_password_provider.check_auth = AsyncMock( + return_value=("@localuser:test", None) ) channel = self._delete_device(tok1, "dev2", body) self.assertEqual(channel.code, 200) @@ -478,10 +473,10 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): self.custom_auth_provider_callback_test_body() def custom_auth_provider_callback_test_body(self) -> None: - callback = Mock(return_value=make_awaitable(None)) + callback = AsyncMock(return_value=None) - mock_password_provider.check_auth.return_value = make_awaitable( - ("@user:test", callback) + mock_password_provider.check_auth = AsyncMock( + return_value=("@user:test", callback) ) channel = self._send_login("test.login_type", "u", test_field="y") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) @@ -616,8 +611,8 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): login is disabled""" # register the user and log in twice via the test login type to get two devices, self.register_user("localuser", "localpass") - mock_password_provider.check_auth.return_value = make_awaitable( - ("@localuser:test", None) + mock_password_provider.check_auth = AsyncMock( + return_value=("@localuser:test", None) ) channel = self._send_login("test.login_type", "localuser", test_field="") self.assertEqual(channel.code, HTTPStatus.OK, channel.result) @@ -835,11 +830,11 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): username: The username to use for the test. registration: Whether to test with registration URLs. """ - self.hs.get_identity_handler().send_threepid_validation = Mock( # type: ignore[assignment] - return_value=make_awaitable(0), + self.hs.get_identity_handler().send_threepid_validation = AsyncMock( # type: ignore[assignment] + return_value=0 ) - m = Mock(return_value=make_awaitable(False)) + m = AsyncMock(return_value=False) self.hs.get_password_auth_provider().is_3pid_allowed_callbacks = [m] self.register_user(username, "password") @@ -869,7 +864,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): m.assert_called_once_with("email", "foo@test.com", registration) - m = Mock(return_value=make_awaitable(True)) + m = AsyncMock(return_value=True) self.hs.get_password_auth_provider().is_3pid_allowed_callbacks = [m] channel = self.make_request( diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index ec2f5d30be..f9b292b9ec 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Awaitable, Callable, Dict -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from parameterized import parameterized @@ -26,7 +26,6 @@ from synapse.types import JsonDict, UserID from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable class ProfileTestCase(unittest.HomeserverTestCase): @@ -35,7 +34,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): servlets = [admin.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.mock_federation = Mock() + self.mock_federation = AsyncMock() self.mock_registry = Mock() self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} @@ -135,9 +134,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): ) def test_get_other_name(self) -> None: - self.mock_federation.make_query.return_value = make_awaitable( - {"displayname": "Alice"} - ) + self.mock_federation.make_query.return_value = {"displayname": "Alice"} displayname = self.get_success(self.handler.get_displayname(self.alice)) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 54eeec228e..a04234829f 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Collection, List, Optional, Tuple -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -38,7 +38,6 @@ from synapse.types import ( ) from synapse.util import Clock -from tests.test_utils import make_awaitable from tests.unittest import override_config from tests.utils import mock_getRawHeaders @@ -203,24 +202,22 @@ class RegistrationTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_not_blocked(self) -> None: - self.store.count_monthly_users = Mock( # type: ignore[assignment] - return_value=make_awaitable(self.hs.config.server.max_mau_value - 1) + self.store.count_monthly_users = AsyncMock( # type: ignore[assignment] + return_value=self.hs.config.server.max_mau_value - 1 ) # Ensure does not throw exception self.get_success(self.get_or_create_user(self.requester, "c", "User")) @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_blocked(self) -> None: - self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.lots_of_users) - ) + self.store.get_monthly_active_count = AsyncMock(return_value=self.lots_of_users) self.get_failure( self.get_or_create_user(self.requester, "b", "display_name"), ResourceLimitError, ) - self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.server.max_mau_value) + self.store.get_monthly_active_count = AsyncMock( + return_value=self.hs.config.server.max_mau_value ) self.get_failure( self.get_or_create_user(self.requester, "b", "display_name"), @@ -229,15 +226,13 @@ class RegistrationTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": True}) def test_register_mau_blocked(self) -> None: - self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.lots_of_users) - ) + self.store.get_monthly_active_count = AsyncMock(return_value=self.lots_of_users) self.get_failure( self.handler.register_user(localpart="local_part"), ResourceLimitError ) - self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.server.max_mau_value) + self.store.get_monthly_active_count = AsyncMock( + return_value=self.hs.config.server.max_mau_value ) self.get_failure( self.handler.register_user(localpart="local_part"), ResourceLimitError @@ -292,7 +287,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): @override_config({"auto_join_rooms": ["#room:test"]}) def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self) -> None: room_alias_str = "#room:test" - self.store.is_real_user = Mock(return_value=make_awaitable(False)) + self.store.is_real_user = AsyncMock(return_value=False) user_id = self.get_success(self.handler.register_user(localpart="support")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) @@ -304,8 +299,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self) -> None: room_alias_str = "#room:test" - self.store.count_real_users = Mock(return_value=make_awaitable(1)) # type: ignore[assignment] - self.store.is_real_user = Mock(return_value=make_awaitable(True)) + self.store.count_real_users = AsyncMock(return_value=1) # type: ignore[assignment] + self.store.is_real_user = AsyncMock(return_value=True) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) directory_handler = self.hs.get_directory_handler() @@ -319,8 +314,8 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user( self, ) -> None: - self.store.count_real_users = Mock(return_value=make_awaitable(2)) # type: ignore[assignment] - self.store.is_real_user = Mock(return_value=make_awaitable(True)) + self.store.count_real_users = AsyncMock(return_value=2) # type: ignore[assignment] + self.store.is_real_user = AsyncMock(return_value=True) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 41199ffa29..3e28117e2c 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -1,4 +1,4 @@ -from unittest.mock import Mock, patch +from unittest.mock import AsyncMock, patch from twisted.test.proto_helpers import MemoryReactor @@ -16,7 +16,6 @@ from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import make_request -from tests.test_utils import make_awaitable from tests.unittest import ( FederatingHomeserverTestCase, HomeserverTestCase, @@ -154,25 +153,21 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): None, ) - mock_make_membership_event = Mock( - return_value=make_awaitable( - ( - self.OTHER_SERVER_NAME, - join_event, - self.hs.config.server.default_room_version, - ) + mock_make_membership_event = AsyncMock( + return_value=( + self.OTHER_SERVER_NAME, + join_event, + self.hs.config.server.default_room_version, ) ) - mock_send_join = Mock( - return_value=make_awaitable( - SendJoinResult( - join_event, - self.OTHER_SERVER_NAME, - state=[create_event], - auth_chain=[create_event], - partial_state=False, - servers_in_room=frozenset(), - ) + mock_send_join = AsyncMock( + return_value=SendJoinResult( + join_event, + self.OTHER_SERVER_NAME, + state=[create_event], + auth_chain=[create_event], + partial_state=False, + servers_in_room=frozenset(), ) ) diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 9f035a02dc..948d04fc32 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional -from unittest.mock import MagicMock, Mock, patch +from unittest.mock import AsyncMock, Mock, patch from twisted.test.proto_helpers import MemoryReactor @@ -29,7 +29,6 @@ from synapse.util import Clock import tests.unittest import tests.utils -from tests.test_utils import make_awaitable class SyncTestCase(tests.unittest.HomeserverTestCase): @@ -253,8 +252,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): mocked_get_prev_events = patch.object( self.hs.get_datastores().main, "get_prev_events_for_room", - new_callable=MagicMock, - return_value=make_awaitable([last_room_creation_event_id]), + new_callable=AsyncMock, + return_value=[last_room_creation_event_id], ) with mocked_get_prev_events: self.helper.join(room_id, eve, tok=eve_token) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 5da1d95f0b..d776526bc1 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -15,7 +15,7 @@ import json from typing import Dict, List, Set -from unittest.mock import ANY, Mock, call +from unittest.mock import ANY, AsyncMock, Mock, call from netaddr import IPSet @@ -33,7 +33,6 @@ from synapse.util import Clock from tests import unittest from tests.server import ThreadedMemoryReactorClock -from tests.test_utils import make_awaitable from tests.unittest import override_config # Some local users to test with @@ -74,11 +73,11 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): # we mock out the keyring so as to skip the authentication check on the # federation API call. mock_keyring = Mock(spec=["verify_json_for_server"]) - mock_keyring.verify_json_for_server.return_value = make_awaitable(True) + mock_keyring.verify_json_for_server = AsyncMock(return_value=True) # we mock out the federation client too - self.mock_federation_client = Mock(spec=["put_json"]) - self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) + self.mock_federation_client = AsyncMock(spec=["put_json"]) + self.mock_federation_client.put_json.return_value = (200, "OK") self.mock_federation_client.agent = MatrixFederationAgent( reactor, tls_client_options_factory=None, @@ -121,20 +120,18 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore = hs.get_datastores().main - self.datastore.get_destination_retry_timings = Mock( - return_value=make_awaitable(None) + self.datastore.get_destination_retry_timings = AsyncMock(return_value=None) + + self.datastore.get_device_updates_by_remote = AsyncMock( # type: ignore[assignment] + return_value=(0, []) ) - self.datastore.get_device_updates_by_remote = Mock( # type: ignore[assignment] - return_value=make_awaitable((0, [])) + self.datastore.get_destination_last_successful_stream_ordering = AsyncMock( # type: ignore[assignment] + return_value=None ) - self.datastore.get_destination_last_successful_stream_ordering = Mock( # type: ignore[assignment] - return_value=make_awaitable(None) - ) - - self.datastore.get_received_txn_response = Mock( # type: ignore[assignment] - return_value=make_awaitable(None) + self.datastore.get_received_txn_response = AsyncMock( # type: ignore[assignment] + return_value=None ) self.room_members: List[UserID] = [] @@ -173,27 +170,25 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore.get_users_in_room = Mock(side_effect=get_users_in_room) - self.datastore.get_user_directory_stream_pos = Mock( # type: ignore[assignment] - side_effect=( - # we deliberately return a non-None stream pos to avoid - # doing an initial_sync - lambda: make_awaitable(1) - ) + self.datastore.get_user_directory_stream_pos = AsyncMock( # type: ignore[assignment] + # we deliberately return a non-None stream pos to avoid + # doing an initial_sync + return_value=1 ) self.datastore.get_partial_current_state_deltas = Mock(return_value=(0, None)) # type: ignore[assignment] self.datastore.get_to_device_stream_token = Mock( # type: ignore[assignment] - side_effect=lambda: 0 + return_value=0 ) - self.datastore.get_new_device_msgs_for_remote = Mock( # type: ignore[assignment] - side_effect=lambda *args, **kargs: make_awaitable(([], 0)) + self.datastore.get_new_device_msgs_for_remote = AsyncMock( # type: ignore[assignment] + return_value=([], 0) ) - self.datastore.delete_device_msgs_for_remote = Mock( # type: ignore[assignment] - side_effect=lambda *args, **kargs: make_awaitable(None) + self.datastore.delete_device_msgs_for_remote = AsyncMock( # type: ignore[assignment] + return_value=None ) - self.datastore.set_received_txn_response = Mock( # type: ignore[assignment] - side_effect=lambda *args, **kwargs: make_awaitable(None) + self.datastore.set_received_txn_response = AsyncMock( # type: ignore[assignment] + return_value=None ) def test_started_typing_local(self) -> None: diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 430209705e..b5f15aa7d4 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Tuple -from unittest.mock import Mock, patch +from unittest.mock import AsyncMock, Mock, patch from urllib.parse import quote from twisted.test.proto_helpers import MemoryReactor @@ -30,7 +30,7 @@ from synapse.util import Clock from tests import unittest from tests.storage.test_user_directory import GetUserDirectoryTables -from tests.test_utils import event_injection, make_awaitable +from tests.test_utils import event_injection from tests.test_utils.event_injection import inject_member_event from tests.unittest import override_config @@ -471,7 +471,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): self.store.register_user(user_id=r_user_id, password_hash=None) ) - mock_remove_from_user_dir = Mock(return_value=make_awaitable(None)) + mock_remove_from_user_dir = AsyncMock(return_value=None) with patch.object( self.store, "remove_from_user_dir", mock_remove_from_user_dir ): diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 6a0b5fc0bd..0d17f2fe5b 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -14,8 +14,8 @@ import base64 import logging import os -from typing import Any, Awaitable, Callable, Generator, List, Optional, cast -from unittest.mock import Mock, patch +from typing import Generator, List, Optional, cast +from unittest.mock import AsyncMock, patch import treq from netaddr import IPSet @@ -41,7 +41,7 @@ from twisted.web.iweb import IPolicyForHTTPS, IResponse from synapse.config.homeserver import HomeServerConfig from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent -from synapse.http.federation.srv_resolver import Server +from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.http.federation.well_known_resolver import ( WELL_KNOWN_MAX_SIZE, WellKnownResolver, @@ -68,21 +68,11 @@ from tests.utils import checked_cast, default_config logger = logging.getLogger(__name__) -# Once Async Mocks or lambdas are supported this can go away. -def generate_resolve_service( - result: List[Server], -) -> Callable[[Any], Awaitable[List[Server]]]: - async def resolve_service(_: Any) -> List[Server]: - return result - - return resolve_service - - class MatrixFederationAgentTests(unittest.TestCase): def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() - self.mock_resolver = Mock() + self.mock_resolver = AsyncMock(spec=SrvResolver) config_dict = default_config("test", parse=False) config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()] @@ -636,7 +626,7 @@ class MatrixFederationAgentTests(unittest.TestCase): """ self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) + self.mock_resolver.resolve_service.return_value = [] self.reactor.lookups["testserv1"] = "1.2.3.4" test_d = self._make_get_request(b"matrix-federation://testserv1/foo/bar") @@ -722,7 +712,7 @@ class MatrixFederationAgentTests(unittest.TestCase): """ self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) + self.mock_resolver.resolve_service.return_value = [] self.reactor.lookups["testserv"] = "1.2.3.4" test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") @@ -776,7 +766,7 @@ class MatrixFederationAgentTests(unittest.TestCase): """Test the behaviour when the .well-known delegates elsewhere""" self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) + self.mock_resolver.resolve_service.return_value = [] self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["target-server"] = "1::f" @@ -840,7 +830,7 @@ class MatrixFederationAgentTests(unittest.TestCase): """ self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) + self.mock_resolver.resolve_service.return_value = [] self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["target-server"] = "1::f" @@ -930,7 +920,7 @@ class MatrixFederationAgentTests(unittest.TestCase): """ self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) + self.mock_resolver.resolve_service.return_value = [] self.reactor.lookups["testserv"] = "1.2.3.4" test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") @@ -986,7 +976,7 @@ class MatrixFederationAgentTests(unittest.TestCase): # the config left to the default, which will not trust it (since the # presented cert is signed by a test CA) - self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) + self.mock_resolver.resolve_service.return_value = [] self.reactor.lookups["testserv"] = "1.2.3.4" config = default_config("test", parse=True) @@ -1037,9 +1027,9 @@ class MatrixFederationAgentTests(unittest.TestCase): """ self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service( - [Server(host=b"srvtarget", port=8443)] - ) + self.mock_resolver.resolve_service.return_value = [ + Server(host=b"srvtarget", port=8443) + ] self.reactor.lookups["srvtarget"] = "1.2.3.4" test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") @@ -1094,9 +1084,9 @@ class MatrixFederationAgentTests(unittest.TestCase): self.assertEqual(host, "1.2.3.4") self.assertEqual(port, 443) - self.mock_resolver.resolve_service.side_effect = generate_resolve_service( - [Server(host=b"srvtarget", port=8443)] - ) + self.mock_resolver.resolve_service.return_value = [ + Server(host=b"srvtarget", port=8443) + ] self._handle_well_known_connection( client_factory, @@ -1137,7 +1127,7 @@ class MatrixFederationAgentTests(unittest.TestCase): """test the behaviour when the server name has idna chars in""" self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service([]) + self.mock_resolver.resolve_service.return_value = [] # the resolver is always called with the IDNA hostname as a native string. self.reactor.lookups["xn--bcher-kva.com"] = "1.2.3.4" @@ -1201,9 +1191,9 @@ class MatrixFederationAgentTests(unittest.TestCase): """test the behaviour when the target of a SRV record has idna chars""" self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service( - [Server(host=b"xn--trget-3qa.com", port=8443)] # târget.com - ) + self.mock_resolver.resolve_service.return_value = [ + Server(host=b"xn--trget-3qa.com", port=8443) + ] # târget.com self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4" test_d = self._make_get_request( @@ -1407,12 +1397,10 @@ class MatrixFederationAgentTests(unittest.TestCase): """Test that other SRV results are tried if the first one fails.""" self.agent = self._make_agent() - self.mock_resolver.resolve_service.side_effect = generate_resolve_service( - [ - Server(host=b"target.com", port=8443), - Server(host=b"target.com", port=8444), - ] - ) + self.mock_resolver.resolve_service.return_value = [ + Server(host=b"target.com", port=8443), + Server(host=b"target.com", port=8444), + ] self.reactor.lookups["target.com"] = "1.2.3.4" test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index a324b4d31d..9b28cd474f 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from netaddr import IPSet @@ -26,7 +26,6 @@ from synapse.types import UserID, create_requester from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import get_clock -from tests.test_utils import make_awaitable logger = logging.getLogger(__name__) @@ -62,7 +61,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): new event. """ mock_client = Mock(spec=["put_json"]) - mock_client.put_json.return_value = make_awaitable({}) + mock_client.put_json = AsyncMock(return_value={}) mock_client.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", @@ -93,7 +92,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): new events. """ mock_client1 = Mock(spec=["put_json"]) - mock_client1.put_json.return_value = make_awaitable({}) + mock_client1.put_json = AsyncMock(return_value={}) mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", @@ -108,7 +107,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): ) mock_client2 = Mock(spec=["put_json"]) - mock_client2.put_json.return_value = make_awaitable({}) + mock_client2.put_json = AsyncMock(return_value={}) mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", @@ -162,7 +161,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): new typing EDUs. """ mock_client1 = Mock(spec=["put_json"]) - mock_client1.put_json.return_value = make_awaitable({}) + mock_client1.put_json = AsyncMock(return_value={}) mock_client1.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", @@ -177,7 +176,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): ) mock_client2 = Mock(spec=["put_json"]) - mock_client2.put_json.return_value = make_awaitable({}) + mock_client2.put_json = AsyncMock(return_value={}) mock_client2.agent = self.matrix_federation_agent self.make_worker_hs( "synapse.app.generic_worker", diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index feb81844ae..339a41c7e1 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -18,7 +18,7 @@ import os import urllib.parse from binascii import unhexlify from typing import List, Optional -from unittest.mock import Mock, patch +from unittest.mock import AsyncMock, Mock, patch from parameterized import parameterized, parameterized_class @@ -45,7 +45,7 @@ from synapse.util import Clock from tests import unittest from tests.server import FakeSite, make_request -from tests.test_utils import SMALL_PNG, make_awaitable +from tests.test_utils import SMALL_PNG from tests.unittest import override_config @@ -419,8 +419,8 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): store = self.hs.get_datastores().main # Set monthly active users to the limit - store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.server.max_mau_value) + store.get_monthly_active_count = AsyncMock( + return_value=self.hs.config.server.max_mau_value ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit @@ -1834,8 +1834,8 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) # Set monthly active users to the limit - self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.server.max_mau_value) + self.store.get_monthly_active_count = AsyncMock( + return_value=self.hs.config.server.max_mau_value ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit @@ -1871,8 +1871,8 @@ class UserRestTestCase(unittest.HomeserverTestCase): handler = self.hs.get_registration_handler() # Set monthly active users to the limit - self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(self.hs.config.server.max_mau_value) + self.store.get_monthly_active_count = AsyncMock( + return_value=self.hs.config.server.max_mau_value ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit diff --git a/tests/rest/client/test_account_data.py b/tests/rest/client/test_account_data.py index d5b0640e7a..481db9a687 100644 --- a/tests/rest/client/test_account_data.py +++ b/tests/rest/client/test_account_data.py @@ -11,13 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock +from unittest.mock import AsyncMock from synapse.rest import admin from synapse.rest.client import account_data, login, room from tests import unittest -from tests.test_utils import make_awaitable class AccountDataTestCase(unittest.HomeserverTestCase): @@ -32,7 +31,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): """Tests that the on_account_data_updated module callback is called correctly when a user's account data changes. """ - mocked_callback = Mock(return_value=make_awaitable(None)) + mocked_callback = AsyncMock(return_value=None) self.hs.get_account_data_handler()._on_account_data_updated_callbacks.append( mocked_callback ) diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index e12098102b..66b387cea3 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -23,7 +23,6 @@ from synapse.types import UserID from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable class PresenceTestCase(unittest.HomeserverTestCase): @@ -36,7 +35,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.presence_handler = Mock(spec=PresenceHandler) - self.presence_handler.set_state.return_value = make_awaitable(None) + self.presence_handler.set_state = AsyncMock(return_value=None) hs = self.setup_test_homeserver( "red", diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 9bfe913e45..d3f6191996 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -15,7 +15,7 @@ import urllib.parse from typing import Any, Callable, Dict, List, Optional, Tuple -from unittest.mock import patch +from unittest.mock import AsyncMock, patch from twisted.test.proto_helpers import MemoryReactor @@ -28,7 +28,6 @@ from synapse.util import Clock from tests import unittest from tests.server import FakeChannel -from tests.test_utils import make_awaitable from tests.test_utils.event_injection import inject_event from tests.unittest import override_config @@ -264,7 +263,8 @@ class RelationsTestCase(BaseRelationsTestCase): # Disable the validation to pretend this came over federation. with patch( "synapse.handlers.message.EventCreationHandler._validate_event_relation", - new=lambda self, event: make_awaitable(None), + new_callable=AsyncMock, + return_value=None, ): # Generate a various relations from a different room. self.get_success( @@ -1300,7 +1300,8 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): # not an event the Client-Server API will allow.. with patch( "synapse.handlers.message.EventCreationHandler._validate_event_relation", - new=lambda self, event: make_awaitable(None), + new_callable=AsyncMock, + return_value=None, ): # Create a sub-thread off the thread, which is not allowed. self._send_relation( diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 88e579dc39..53182459e4 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -20,7 +20,7 @@ import json from http import HTTPStatus from typing import Any, Dict, Iterable, List, Optional, Tuple, Union -from unittest.mock import Mock, call, patch +from unittest.mock import AsyncMock, Mock, call, patch from urllib import parse as urlparse from parameterized import param, parameterized @@ -52,7 +52,6 @@ from synapse.util.stringutils import random_string from tests import unittest from tests.http.server._base import make_request_with_cancellation_test from tests.storage.test_stream import PaginationTestCase -from tests.test_utils import make_awaitable from tests.test_utils.event_injection import create_event from tests.unittest import override_config @@ -70,8 +69,8 @@ class RoomBase(unittest.HomeserverTestCase): ) self.hs.get_federation_handler = Mock() # type: ignore[assignment] - self.hs.get_federation_handler.return_value.maybe_backfill = Mock( - return_value=make_awaitable(None) + self.hs.get_federation_handler.return_value.maybe_backfill = AsyncMock( + return_value=None ) async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: @@ -2375,7 +2374,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - return self.setup_test_homeserver(federation_client=Mock()) + return self.setup_test_homeserver(federation_client=AsyncMock()) def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.register_user("user", "pass") @@ -2385,7 +2384,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): def test_simple(self) -> None: "Simple test for searching rooms over federation" - self.federation_client.get_public_rooms.return_value = make_awaitable({}) # type: ignore[attr-defined] + self.federation_client.get_public_rooms.return_value = {} # type: ignore[attr-defined] search_filter = {PublicRoomsFilterFields.GENERIC_SEARCH_TERM: "foobar"} @@ -2413,7 +2412,7 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): # with a 404, when using search filters. self.federation_client.get_public_rooms.side_effect = ( # type: ignore[attr-defined] HttpResponseException(HTTPStatus.NOT_FOUND, "Not Found", b""), - make_awaitable({}), + {}, ) search_filter = {PublicRoomsFilterFields.GENERIC_SEARCH_TERM: "foobar"} @@ -3413,17 +3412,17 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. - make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) + make_invite_mock = AsyncMock(return_value=(Mock(event_id="abc"), 0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] - self.hs.get_identity_handler().lookup_3pid = Mock( # type: ignore[assignment] - return_value=make_awaitable(None), + self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[assignment] + return_value=None, ) # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it # allow everything for now. # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. - mock = Mock(return_value=make_awaitable(True), spec=lambda *x: None) + mock = AsyncMock(return_value=True, spec=lambda *x: None) self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( mock ) @@ -3451,7 +3450,7 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Now change the return value of the callback to deny any invite and test that # we can't send the invite. - mock.return_value = make_awaitable(False) + mock.return_value = False channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", @@ -3477,18 +3476,18 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. - make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) + make_invite_mock = AsyncMock(return_value=(Mock(event_id="abc"), 0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] - self.hs.get_identity_handler().lookup_3pid = Mock( # type: ignore[assignment] - return_value=make_awaitable(None), + self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[assignment] + return_value=None, ) # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it # allow everything for now. # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. - mock = Mock( - return_value=make_awaitable(synapse.module_api.NOT_SPAM), + mock = AsyncMock( + return_value=synapse.module_api.NOT_SPAM, spec=lambda *x: None, ) self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( @@ -3519,7 +3518,7 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Now change the return value of the callback to deny any invite and test that # we can't send the invite. We pick an arbitrary error code to be able to check # that the same code has been returned - mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) + mock.return_value = Codes.CONSENT_NOT_GIVEN channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", @@ -3538,7 +3537,7 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): make_invite_mock.assert_called_once() # Run variant with `Tuple[Codes, dict]`. - mock.return_value = make_awaitable((Codes.EXPIRED_ACCOUNT, {"field": "value"})) + mock.return_value = (Codes.EXPIRED_ACCOUNT, {"field": "value"}) channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index e5ba5a9706..da37fcb045 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -13,7 +13,7 @@ # limitations under the License. import threading from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -33,7 +33,6 @@ from synapse.util import Clock from synapse.util.frozenutils import unfreeze from tests import unittest -from tests.test_utils import make_awaitable if TYPE_CHECKING: from synapse.module_api import ModuleApi @@ -477,7 +476,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): def test_on_new_event(self) -> None: """Test that the on_new_event callback is called on new events""" - on_new_event = Mock(make_awaitable(None)) + on_new_event = AsyncMock(return_value=None) self.hs.get_module_api_callbacks().third_party_event_rules._on_new_event_callbacks.append( on_new_event ) @@ -580,7 +579,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): avatar_url = "mxc://matrix.org/oWQDvfewxmlRaRCkVbfetyEo" # Register a mock callback. - m = Mock(return_value=make_awaitable(None)) + m = AsyncMock(return_value=None) self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( m ) @@ -641,7 +640,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): avatar_url = "mxc://matrix.org/oWQDvfewxmlRaRCkVbfetyEo" # Register a mock callback. - m = Mock(return_value=make_awaitable(None)) + m = AsyncMock(return_value=None) self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( m ) @@ -682,7 +681,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): correctly when processing a user's deactivation. """ # Register a mocked callback. - deactivation_mock = Mock(return_value=make_awaitable(None)) + deactivation_mock = AsyncMock(return_value=None) third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_user_deactivation_status_changed_callbacks.append( deactivation_mock, @@ -690,7 +689,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Also register a mocked callback for profile updates, to check that the # deactivation code calls it in a way that let modules know the user is being # deactivated. - profile_mock = Mock(return_value=make_awaitable(None)) + profile_mock = AsyncMock(return_value=None) self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append( profile_mock, ) @@ -740,7 +739,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): well as a reactivation. """ # Register a mock callback. - m = Mock(return_value=make_awaitable(None)) + m = AsyncMock(return_value=None) third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_user_deactivation_status_changed_callbacks.append(m) @@ -794,7 +793,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): correctly when processing a user's deactivation. """ # Register a mocked callback. - deactivation_mock = Mock(return_value=make_awaitable(False)) + deactivation_mock = AsyncMock(return_value=False) third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_deactivate_user_callbacks.append( deactivation_mock, @@ -840,7 +839,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): correctly when processing a user's deactivation triggered by a server admin. """ # Register a mocked callback. - deactivation_mock = Mock(return_value=make_awaitable(False)) + deactivation_mock = AsyncMock(return_value=False) third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_deactivate_user_callbacks.append( deactivation_mock, @@ -879,7 +878,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): correctly when processing an admin's shutdown room request. """ # Register a mocked callback. - shutdown_mock = Mock(return_value=make_awaitable(False)) + shutdown_mock = AsyncMock(return_value=False) third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._check_can_shutdown_room_callbacks.append( shutdown_mock, @@ -915,7 +914,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): associating a 3PID to an account. """ # Register a mocked callback. - threepid_bind_mock = Mock(return_value=make_awaitable(None)) + threepid_bind_mock = AsyncMock(return_value=None) third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules third_party_rules._on_threepid_bind_callbacks.append(threepid_bind_mock) @@ -957,11 +956,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): just before associating and removing a 3PID to/from an account. """ # Pretend to be a Synapse module and register both callbacks as mocks. - on_add_user_third_party_identifier_callback_mock = Mock( - return_value=make_awaitable(None) - ) - on_remove_user_third_party_identifier_callback_mock = Mock( - return_value=make_awaitable(None) + on_add_user_third_party_identifier_callback_mock = AsyncMock(return_value=None) + on_remove_user_third_party_identifier_callback_mock = AsyncMock( + return_value=None ) self.hs.get_module_api().register_third_party_rules_callbacks( on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock, @@ -1021,8 +1018,8 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): when a user is deactivated and their third-party ID associations are deleted. """ # Pretend to be a Synapse module and register both callbacks as mocks. - on_remove_user_third_party_identifier_callback_mock = Mock( - return_value=make_awaitable(None) + on_remove_user_third_party_identifier_callback_mock = AsyncMock( + return_value=None ) self.hs.get_module_api().register_third_party_rules_callbacks( on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index d8dc56261a..951a3cbc43 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -14,7 +14,7 @@ from http import HTTPStatus from typing import Any, Generator, Tuple, cast -from unittest.mock import Mock, call +from unittest.mock import AsyncMock, Mock, call from twisted.internet import defer, reactor as _reactor @@ -24,7 +24,6 @@ from synapse.types import ISynapseReactor, JsonDict from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable from tests.utils import MockClock reactor = cast(ISynapseReactor, _reactor) @@ -53,7 +52,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): def test_executes_given_function( self, ) -> Generator["defer.Deferred[Any]", object, None]: - cb = Mock(return_value=make_awaitable(self.mock_http_response)) + cb = AsyncMock(return_value=self.mock_http_response) res = yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "some_arg", keyword="arg" ) @@ -64,7 +63,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): def test_deduplicates_based_on_key( self, ) -> Generator["defer.Deferred[Any]", object, None]: - cb = Mock(return_value=make_awaitable(self.mock_http_response)) + cb = AsyncMock(return_value=self.mock_http_response) for i in range(3): # invoke multiple times res = yield self.cache.fetch_or_execute_request( self.mock_request, @@ -168,7 +167,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): @defer.inlineCallbacks def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: - cb = Mock(return_value=make_awaitable(self.mock_http_response)) + cb = AsyncMock(return_value=self.mock_http_response) yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" ) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index d2bfa53eda..47c53a5475 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Tuple -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -29,7 +29,6 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable from tests.unittest import override_config from tests.utils import default_config @@ -69,24 +68,22 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): assert isinstance(rlsn, ResourceLimitsServerNotices) self._rlsn = rlsn - self._rlsn._store.user_last_seen_monthly_active = Mock( - return_value=make_awaitable(1000) - ) - self._rlsn._server_notices_manager.send_notice = Mock( # type: ignore[assignment] - return_value=make_awaitable(Mock()) + self._rlsn._store.user_last_seen_monthly_active = AsyncMock(return_value=1000) + self._rlsn._server_notices_manager.send_notice = AsyncMock( # type: ignore[assignment] + return_value=Mock() ) self._send_notice = self._rlsn._server_notices_manager.send_notice self.user_id = "@user_id:test" - self._rlsn._server_notices_manager.get_or_create_notice_room_for_user = Mock( - return_value=make_awaitable("!something:localhost") + self._rlsn._server_notices_manager.get_or_create_notice_room_for_user = ( + AsyncMock(return_value="!something:localhost") ) - self._rlsn._server_notices_manager.maybe_get_notice_room_for_user = Mock( - return_value=make_awaitable("!something:localhost") + self._rlsn._server_notices_manager.maybe_get_notice_room_for_user = AsyncMock( + return_value="!something:localhost" ) - self._rlsn._store.add_tag_to_room = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] - self._rlsn._store.get_tags_for_room = Mock(return_value=make_awaitable({})) # type: ignore[assignment] + self._rlsn._store.add_tag_to_room = AsyncMock(return_value=None) # type: ignore[assignment] + self._rlsn._store.get_tags_for_room = AsyncMock(return_value={}) # type: ignore[assignment] @override_config({"hs_disabled": True}) def test_maybe_send_server_notice_disabled_hs(self) -> None: @@ -103,14 +100,14 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self) -> None: """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None) + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( # type: ignore[assignment] - return_value=make_awaitable({"123": mock_event}) + self._rlsn._store.get_events = AsyncMock( # type: ignore[assignment] + return_value={"123": mock_event} ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) # Would be better to check the content, but once == remove blocking event @@ -125,16 +122,16 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user has blocked notice, but notice ought to be there (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None), + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None, side_effect=ResourceLimitError(403, "foo"), ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( # type: ignore[assignment] - return_value=make_awaitable({"123": mock_event}) + self._rlsn._store.get_events = AsyncMock( # type: ignore[assignment] + return_value={"123": mock_event} ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -145,8 +142,8 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, but should have one """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None), + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None, side_effect=ResourceLimitError(403, "foo"), ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -158,8 +155,8 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None) + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -171,12 +168,10 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None) - ) - self._rlsn._store.user_last_seen_monthly_active = Mock( - return_value=make_awaitable(None) + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None ) + self._rlsn._store.user_last_seen_monthly_active = AsyncMock(return_value=None) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_not_called() @@ -189,8 +184,8 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None), + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None, side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), @@ -204,8 +199,8 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test that when a server is disabled, that MAU limit alerting is ignored. """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None), + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None, side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED ), @@ -223,22 +218,22 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] - return_value=make_awaitable(None), + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + return_value=None, side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) - self._rlsn._is_room_currently_blocked = Mock( # type: ignore[assignment] - return_value=make_awaitable((True, [])) + self._rlsn._is_room_currently_blocked = AsyncMock( # type: ignore[assignment] + return_value=(True, []) ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( # type: ignore[assignment] - return_value=make_awaitable({"123": mock_event}) + self._rlsn._store.get_events = AsyncMock( # type: ignore[assignment] + return_value={"123": mock_event} ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -284,11 +279,9 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): self.user_id = "@user_id:test" def test_server_notice_only_sent_once(self) -> None: - self.store.get_monthly_active_count = Mock(return_value=make_awaitable(1000)) + self.store.get_monthly_active_count = AsyncMock(return_value=1000) - self.store.user_last_seen_monthly_active = Mock( - return_value=make_awaitable(1000) - ) + self.store.user_last_seen_monthly_active = AsyncMock(return_value=1000) # Call the function multiple times to ensure we only send the notice once self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -327,7 +320,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): hasn't been reached (since it's the only user and the limit is 5), so users shouldn't receive a server notice. """ - m = Mock(return_value=make_awaitable(None)) + m = AsyncMock(return_value=None) self._rlsn._server_notices_manager.maybe_get_notice_room_for_user = m user_id = self.register_user("user", "password") diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 71302facd1..48f39df9fe 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -15,7 +15,7 @@ import json import os import tempfile from typing import List, cast -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock import yaml @@ -35,7 +35,6 @@ from synapse.types import DeviceListUpdates from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase): @@ -339,7 +338,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase): # we aren't testing store._base stuff here, so mock this out # (ignore needed because Mypy won't allow us to assign to a method otherwise) - self.store.get_events_as_list = Mock(return_value=make_awaitable(events)) # type: ignore[assignment] + self.store.get_events_as_list = AsyncMock(return_value=events) # type: ignore[assignment] self.get_success(self._insert_txn(self.as_list[1]["id"], 9, other_events)) self.get_success(self._insert_txn(service.id, 10, events)) diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index a4a823a252..2af7280ba3 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock import yaml @@ -32,7 +32,7 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable, simple_async_mock +from tests.test_utils import simple_async_mock from tests.unittest import override_config @@ -363,9 +363,9 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): # Register the callbacks with more mocks self.hs.get_module_api().register_background_update_controller_callbacks( on_update=self._on_update, - min_batch_size=Mock(return_value=make_awaitable(self._default_batch_size)), - default_batch_size=Mock( - return_value=make_awaitable(self._default_batch_size), + min_batch_size=AsyncMock(return_value=self._default_batch_size), + default_batch_size=AsyncMock( + return_value=self._default_batch_size, ), ) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 209d68b40b..12e24d4dbd 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -14,7 +14,7 @@ # limitations under the License. from typing import Any, Dict -from unittest.mock import Mock +from unittest.mock import AsyncMock from parameterized import parameterized @@ -30,7 +30,6 @@ from synapse.util import Clock from tests import unittest from tests.server import make_request -from tests.test_utils import make_awaitable from tests.unittest import override_config @@ -443,9 +442,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): lots_of_users = 100 user_id = "@user:server" - self.store.get_monthly_active_count = Mock( - return_value=make_awaitable(lots_of_users) - ) + self.store.get_monthly_active_count = AsyncMock(return_value=lots_of_users) self.get_success( self.store.insert_client_ip( user_id, "access_token", "ip", "user_agent", "device_id" diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 2827738379..0bf706ba08 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List -from unittest.mock import Mock +from unittest.mock import AsyncMock from twisted.test.proto_helpers import MemoryReactor @@ -21,7 +21,6 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.test_utils import make_awaitable from tests.unittest import default_config, override_config FORTY_DAYS = 40 * 24 * 60 * 60 @@ -253,7 +252,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): ) self.get_success(d) - self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] d = self.store.populate_monthly_active_users(user_id) self.get_success(d) @@ -261,24 +260,22 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.upsert_monthly_active_user.assert_not_called() def test_populate_monthly_users_should_update(self) -> None: - self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] - self.store.is_trial_user = Mock(return_value=make_awaitable(False)) # type: ignore[assignment] + self.store.is_trial_user = AsyncMock(return_value=False) # type: ignore[assignment] - self.store.user_last_seen_monthly_active = Mock( - return_value=make_awaitable(None) - ) + self.store.user_last_seen_monthly_active = AsyncMock(return_value=None) d = self.store.populate_monthly_active_users("user_id") self.get_success(d) self.store.upsert_monthly_active_user.assert_called_once() def test_populate_monthly_users_should_not_update(self) -> None: - self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] - self.store.is_trial_user = Mock(return_value=make_awaitable(False)) # type: ignore[assignment] - self.store.user_last_seen_monthly_active = Mock( - return_value=make_awaitable(self.hs.get_clock().time_msec()) + self.store.is_trial_user = AsyncMock(return_value=False) # type: ignore[assignment] + self.store.user_last_seen_monthly_active = AsyncMock( + return_value=self.hs.get_clock().time_msec() ) d = self.store.populate_monthly_active_users("user_id") @@ -359,7 +356,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": False, "mau_stats_only": False}) def test_no_users_when_not_tracking(self) -> None: - self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] self.get_success(self.store.populate_monthly_active_users("@user:sever")) diff --git a/tests/storage/util/test_partial_state_events_tracker.py b/tests/storage/util/test_partial_state_events_tracker.py index 0e3fc2a77f..29be8cdbd0 100644 --- a/tests/storage/util/test_partial_state_events_tracker.py +++ b/tests/storage/util/test_partial_state_events_tracker.py @@ -22,7 +22,6 @@ from synapse.storage.util.partial_state_events_tracker import ( PartialStateEventsTracker, ) -from tests.test_utils import make_awaitable from tests.unittest import TestCase @@ -124,16 +123,17 @@ class PartialStateEventsTrackerTestCase(TestCase): class PartialCurrentStateTrackerTestCase(TestCase): def setUp(self) -> None: self.mock_store = mock.Mock(spec_set=["is_partial_state_room"]) + self.mock_store.is_partial_state_room = mock.AsyncMock() self.tracker = PartialCurrentStateTracker(self.mock_store) def test_does_not_block_for_full_state_rooms(self) -> None: - self.mock_store.is_partial_state_room.return_value = make_awaitable(False) + self.mock_store.is_partial_state_room.return_value = False self.successResultOf(ensureDeferred(self.tracker.await_full_state("room_id"))) def test_blocks_for_partial_room_state(self) -> None: - self.mock_store.is_partial_state_room.return_value = make_awaitable(True) + self.mock_store.is_partial_state_room.return_value = True d = ensureDeferred(self.tracker.await_full_state("room_id")) @@ -156,7 +156,7 @@ class PartialCurrentStateTrackerTestCase(TestCase): self.successResultOf(ensureDeferred(self.tracker.await_full_state("room_id"))) def test_cancellation(self) -> None: - self.mock_store.is_partial_state_room.return_value = make_awaitable(True) + self.mock_store.is_partial_state_room.return_value = True d1 = ensureDeferred(self.tracker.await_full_state("room_id")) self.assertNoResult(d1) diff --git a/tests/test_federation.py b/tests/test_federation.py index 6d15ac7597..779f70467b 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Collection, List, Optional, Union -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -31,7 +31,6 @@ from synapse.util import Clock from synapse.util.retryutils import NotRetryingDestination from tests import unittest -from tests.test_utils import make_awaitable class MessageAcceptTests(unittest.HomeserverTestCase): @@ -196,7 +195,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Register a mock on the store so that the incoming update doesn't fail because # we don't share a room with the user. store = self.hs.get_datastores().main - store.get_rooms_for_user = Mock(return_value=make_awaitable(["!someroom:test"])) + store.get_rooms_for_user = AsyncMock(return_value=["!someroom:test"]) # Manually inject a fake device list update. We need this update to include at # least one prev_id so that the user's device list will need to be retried. @@ -241,27 +240,24 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Register mock device list retrieval on the federation client. federation_client = self.hs.get_federation_client() - federation_client.query_user_devices = Mock( # type: ignore[assignment] - return_value=make_awaitable( - { + federation_client.query_user_devices = AsyncMock( # type: ignore[assignment] + return_value={ + "user_id": remote_user_id, + "stream_id": 1, + "devices": [], + "master_key": { "user_id": remote_user_id, - "stream_id": 1, - "devices": [], - "master_key": { - "user_id": remote_user_id, - "usage": ["master"], - "keys": {"ed25519:" + remote_master_key: remote_master_key}, + "usage": ["master"], + "keys": {"ed25519:" + remote_master_key: remote_master_key}, + }, + "self_signing_key": { + "user_id": remote_user_id, + "usage": ["self_signing"], + "keys": { + "ed25519:" + remote_self_signing_key: remote_self_signing_key }, - "self_signing_key": { - "user_id": remote_user_id, - "usage": ["self_signing"], - "keys": { - "ed25519:" - + remote_self_signing_key: remote_self_signing_key - }, - }, - } - ) + }, + } ) # Resync the device list. diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index c8cc841d95..21e112a8b5 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -18,7 +18,6 @@ Utilities for running the unit tests import json import sys import warnings -from asyncio import Future from binascii import unhexlify from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, TypeVar from unittest.mock import Mock @@ -57,17 +56,6 @@ def get_awaitable_result(awaitable: Awaitable[TV]) -> TV: raise Exception("awaitable has not yet completed") -def make_awaitable(result: TV) -> Awaitable[TV]: - """ - Makes an awaitable, suitable for mocking an `async` function. - This uses Futures as they can be awaited multiple times so can be returned - to multiple callers. - """ - future: Future[TV] = Future() - future.set_result(result) - return future - - def setup_awaitable_errors() -> Callable[[], None]: """ Convert warnings from a non-awaited coroutines into errors. From 5c9402b9fdaad3141b0a9d05614535705a14e65d Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Fri, 25 Aug 2023 12:25:34 +0100 Subject: [PATCH 373/562] Add warnings about MSC3861 on certain APIs. (#16168) --- changelog.d/16168.doc | 1 + docs/admin_api/account_validity.md | 2 ++ docs/admin_api/register_api.md | 2 ++ docs/admin_api/user_admin_api.md | 10 +++++++++- .../administration/admin_api/registration_tokens.md | 2 ++ 5 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16168.doc diff --git a/changelog.d/16168.doc b/changelog.d/16168.doc new file mode 100644 index 0000000000..7dadb047be --- /dev/null +++ b/changelog.d/16168.doc @@ -0,0 +1 @@ +Document which admin APIs are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. diff --git a/docs/admin_api/account_validity.md b/docs/admin_api/account_validity.md index 87d8f7150e..dfa69e515b 100644 --- a/docs/admin_api/account_validity.md +++ b/docs/admin_api/account_validity.md @@ -1,5 +1,7 @@ # Account validity API +**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) + This API allows a server administrator to manage the validity of an account. To use it, you must enable the account validity feature (under `account_validity`) in Synapse's configuration. diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md index dd2830f3a1..e9a235ada5 100644 --- a/docs/admin_api/register_api.md +++ b/docs/admin_api/register_api.md @@ -1,5 +1,7 @@ # Shared-Secret Registration +**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) + This API allows for the creation of users in an administrative and non-interactive way. This is generally used for bootstrapping a Synapse instance with administrator accounts. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 99abfea3a0..8032e05497 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -218,7 +218,7 @@ The following parameters should be set in the URL: - `name` - Is optional and filters to only return users with user ID localparts **or** displaynames that contain this value. - `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users. - Defaults to `true` to include guest users. + Defaults to `true` to include guest users. This parameter is not supported when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) - `admins` - Optional flag to filter admins. If `true`, only admins are queried. If `false`, admins are excluded from the query. When the flag is absent (the default), **both** admins and non-admins are included in the search results. - `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users. @@ -390,6 +390,8 @@ The following actions are **NOT** performed. The list may be incomplete. ## Reset password +**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) + Changes the password of another user. This will automatically log the user out of all their devices. The api is: @@ -413,6 +415,8 @@ The parameter `logout_devices` is optional and defaults to `true`. ## Get whether a user is a server administrator or not +**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) + The api is: ``` @@ -430,6 +434,8 @@ A response body like the following is returned: ## Change whether a user is a server administrator or not +**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) + Note that you cannot demote yourself. The api is: @@ -723,6 +729,8 @@ delete largest/smallest or newest/oldest files first. ## Login as a user +**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) + Get an access token that can be used to authenticate as that user. Useful for when admins wish to do actions on behalf of a user. diff --git a/docs/usage/administration/admin_api/registration_tokens.md b/docs/usage/administration/admin_api/registration_tokens.md index c5130859d4..ba95bcf038 100644 --- a/docs/usage/administration/admin_api/registration_tokens.md +++ b/docs/usage/administration/admin_api/registration_tokens.md @@ -1,5 +1,7 @@ # Registration Tokens +**Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) + This API allows you to manage tokens which can be used to authenticate registration requests, as proposed in [MSC3231](https://github.com/matrix-org/matrix-doc/blob/main/proposals/3231-token-authenticated-registration.md) From a8a46b13360b8bd07cbca48798791098ef6d6d3c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 25 Aug 2023 09:27:21 -0400 Subject: [PATCH 374/562] Replace simple_async_mock with AsyncMock (#16180) Python 3.8 has a native AsyncMock, use it instead of a custom implementation. --- changelog.d/16180.misc | 1 + tests/api/test_auth.py | 97 +++++++++++---------- tests/appservice/test_appservice.py | 31 +++---- tests/appservice/test_scheduler.py | 43 ++++----- tests/events/test_presence_router.py | 5 +- tests/handlers/test_appservice.py | 8 +- tests/handlers/test_cas.py | 11 ++- tests/handlers/test_oauth_delegation.py | 42 ++++----- tests/handlers/test_oidc.py | 6 +- tests/handlers/test_saml.py | 13 ++- tests/module_api/test_api.py | 9 +- tests/push/test_bulk_push_rule_evaluator.py | 5 +- tests/rest/client/test_notifications.py | 5 +- tests/storage/test_background_update.py | 5 +- tests/test_utils/__init__.py | 19 +--- 15 files changed, 140 insertions(+), 160 deletions(-) create mode 100644 changelog.d/16180.misc diff --git a/changelog.d/16180.misc b/changelog.d/16180.misc new file mode 100644 index 0000000000..8d04954ab9 --- /dev/null +++ b/changelog.d/16180.misc @@ -0,0 +1 @@ +Use `AsyncMock` instead of custom code. diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index ce96574915..dcd01d5688 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock import pymacaroons @@ -35,7 +35,6 @@ from synapse.types import Requester, UserID from synapse.util import Clock from tests import unittest -from tests.test_utils import simple_async_mock from tests.unittest import override_config from tests.utils import mock_getRawHeaders @@ -60,16 +59,16 @@ class AuthTestCase(unittest.HomeserverTestCase): # this is overridden for the appservice tests self.store.get_app_service_by_token = Mock(return_value=None) - self.store.insert_client_ip = simple_async_mock(None) - self.store.is_support_user = simple_async_mock(False) + self.store.insert_client_ip = AsyncMock(return_value=None) + self.store.is_support_user = AsyncMock(return_value=False) def test_get_user_by_req_user_valid_token(self) -> None: user_info = TokenLookupResult( user_id=self.test_user, token_id=5, device_id="device" ) - self.store.get_user_by_access_token = simple_async_mock(user_info) - self.store.mark_access_token_as_used = simple_async_mock(None) - self.store.get_user_locked_status = simple_async_mock(False) + self.store.get_user_by_access_token = AsyncMock(return_value=user_info) + self.store.mark_access_token_as_used = AsyncMock(return_value=None) + self.store.get_user_locked_status = AsyncMock(return_value=False) request = Mock(args={}) request.args[b"access_token"] = [self.test_token] @@ -78,7 +77,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertEqual(requester.user.to_string(), self.test_user) def test_get_user_by_req_user_bad_token(self) -> None: - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.args[b"access_token"] = [self.test_token] @@ -91,7 +90,7 @@ class AuthTestCase(unittest.HomeserverTestCase): def test_get_user_by_req_user_missing_token(self) -> None: user_info = TokenLookupResult(user_id=self.test_user, token_id=5) - self.store.get_user_by_access_token = simple_async_mock(user_info) + self.store.get_user_by_access_token = AsyncMock(return_value=user_info) request = Mock(args={}) request.requestHeaders.getRawHeaders = mock_getRawHeaders() @@ -106,7 +105,7 @@ class AuthTestCase(unittest.HomeserverTestCase): token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None ) self.store.get_app_service_by_token = Mock(return_value=app_service) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" @@ -125,7 +124,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ip_range_whitelist=IPSet(["192.168/16"]), ) self.store.get_app_service_by_token = Mock(return_value=app_service) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.getClientAddress.return_value.host = "192.168.10.10" @@ -144,7 +143,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ip_range_whitelist=IPSet(["192.168/16"]), ) self.store.get_app_service_by_token = Mock(return_value=app_service) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.getClientAddress.return_value.host = "131.111.8.42" @@ -158,7 +157,7 @@ class AuthTestCase(unittest.HomeserverTestCase): def test_get_user_by_req_appservice_bad_token(self) -> None: self.store.get_app_service_by_token = Mock(return_value=None) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.args[b"access_token"] = [self.test_token] @@ -172,7 +171,7 @@ class AuthTestCase(unittest.HomeserverTestCase): def test_get_user_by_req_appservice_missing_token(self) -> None: app_service = Mock(token="foobar", url="a_url", sender=self.test_user) self.store.get_app_service_by_token = Mock(return_value=app_service) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.requestHeaders.getRawHeaders = mock_getRawHeaders() @@ -190,8 +189,8 @@ class AuthTestCase(unittest.HomeserverTestCase): app_service.is_interested_in_user = Mock(return_value=True) self.store.get_app_service_by_token = Mock(return_value=app_service) # This just needs to return a truth-y value. - self.store.get_user_by_id = simple_async_mock({"is_guest": False}) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_id = AsyncMock(return_value={"is_guest": False}) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" @@ -210,7 +209,7 @@ class AuthTestCase(unittest.HomeserverTestCase): ) app_service.is_interested_in_user = Mock(return_value=False) self.store.get_app_service_by_token = Mock(return_value=app_service) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" @@ -234,10 +233,10 @@ class AuthTestCase(unittest.HomeserverTestCase): app_service.is_interested_in_user = Mock(return_value=True) self.store.get_app_service_by_token = Mock(return_value=app_service) # This just needs to return a truth-y value. - self.store.get_user_by_id = simple_async_mock({"is_guest": False}) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_id = AsyncMock(return_value={"is_guest": False}) + self.store.get_user_by_access_token = AsyncMock(return_value=None) # This also needs to just return a truth-y value - self.store.get_device = simple_async_mock({"hidden": False}) + self.store.get_device = AsyncMock(return_value={"hidden": False}) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" @@ -266,10 +265,10 @@ class AuthTestCase(unittest.HomeserverTestCase): app_service.is_interested_in_user = Mock(return_value=True) self.store.get_app_service_by_token = Mock(return_value=app_service) # This just needs to return a truth-y value. - self.store.get_user_by_id = simple_async_mock({"is_guest": False}) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_id = AsyncMock(return_value={"is_guest": False}) + self.store.get_user_by_access_token = AsyncMock(return_value=None) # This also needs to just return a falsey value - self.store.get_device = simple_async_mock(None) + self.store.get_device = AsyncMock(return_value=None) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" @@ -283,8 +282,8 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertEqual(failure.value.errcode, Codes.EXCLUSIVE) def test_get_user_by_req__puppeted_token__not_tracking_puppeted_mau(self) -> None: - self.store.get_user_by_access_token = simple_async_mock( - TokenLookupResult( + self.store.get_user_by_access_token = AsyncMock( + return_value=TokenLookupResult( user_id="@baldrick:matrix.org", device_id="device", token_id=5, @@ -292,9 +291,9 @@ class AuthTestCase(unittest.HomeserverTestCase): token_used=True, ) ) - self.store.insert_client_ip = simple_async_mock(None) - self.store.mark_access_token_as_used = simple_async_mock(None) - self.store.get_user_locked_status = simple_async_mock(False) + self.store.insert_client_ip = AsyncMock(return_value=None) + self.store.mark_access_token_as_used = AsyncMock(return_value=None) + self.store.get_user_locked_status = AsyncMock(return_value=False) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] @@ -304,8 +303,8 @@ class AuthTestCase(unittest.HomeserverTestCase): def test_get_user_by_req__puppeted_token__tracking_puppeted_mau(self) -> None: self.auth._track_puppeted_user_ips = True - self.store.get_user_by_access_token = simple_async_mock( - TokenLookupResult( + self.store.get_user_by_access_token = AsyncMock( + return_value=TokenLookupResult( user_id="@baldrick:matrix.org", device_id="device", token_id=5, @@ -313,9 +312,9 @@ class AuthTestCase(unittest.HomeserverTestCase): token_used=True, ) ) - self.store.get_user_locked_status = simple_async_mock(False) - self.store.insert_client_ip = simple_async_mock(None) - self.store.mark_access_token_as_used = simple_async_mock(None) + self.store.get_user_locked_status = AsyncMock(return_value=False) + self.store.insert_client_ip = AsyncMock(return_value=None) + self.store.mark_access_token_as_used = AsyncMock(return_value=None) request = Mock(args={}) request.getClientAddress.return_value.host = "127.0.0.1" request.args[b"access_token"] = [self.test_token] @@ -324,7 +323,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertEqual(self.store.insert_client_ip.call_count, 2) def test_get_user_from_macaroon(self) -> None: - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_access_token = AsyncMock(return_value=None) user_id = "@baldrick:matrix.org" macaroon = pymacaroons.Macaroon( @@ -342,8 +341,8 @@ class AuthTestCase(unittest.HomeserverTestCase): ) def test_get_guest_user_from_macaroon(self) -> None: - self.store.get_user_by_id = simple_async_mock({"is_guest": True}) - self.store.get_user_by_access_token = simple_async_mock(None) + self.store.get_user_by_id = AsyncMock(return_value={"is_guest": True}) + self.store.get_user_by_access_token = AsyncMock(return_value=None) user_id = "@baldrick:matrix.org" macaroon = pymacaroons.Macaroon( @@ -373,7 +372,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.auth_blocking._limit_usage_by_mau = True - self.store.get_monthly_active_count = simple_async_mock(lots_of_users) + self.store.get_monthly_active_count = AsyncMock(return_value=lots_of_users) e = self.get_failure( self.auth_blocking.check_auth_blocking(), ResourceLimitError @@ -383,25 +382,27 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertEqual(e.value.code, 403) # Ensure does not throw an error - self.store.get_monthly_active_count = simple_async_mock(small_number_of_users) + self.store.get_monthly_active_count = AsyncMock( + return_value=small_number_of_users + ) self.get_success(self.auth_blocking.check_auth_blocking()) def test_blocking_mau__depending_on_user_type(self) -> None: self.auth_blocking._max_mau_value = 50 self.auth_blocking._limit_usage_by_mau = True - self.store.get_monthly_active_count = simple_async_mock(100) + self.store.get_monthly_active_count = AsyncMock(return_value=100) # Support users allowed self.get_success( self.auth_blocking.check_auth_blocking(user_type=UserTypes.SUPPORT) ) - self.store.get_monthly_active_count = simple_async_mock(100) + self.store.get_monthly_active_count = AsyncMock(return_value=100) # Bots not allowed self.get_failure( self.auth_blocking.check_auth_blocking(user_type=UserTypes.BOT), ResourceLimitError, ) - self.store.get_monthly_active_count = simple_async_mock(100) + self.store.get_monthly_active_count = AsyncMock(return_value=100) # Real users not allowed self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) @@ -412,9 +413,9 @@ class AuthTestCase(unittest.HomeserverTestCase): self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._track_appservice_user_ips = False - self.store.get_monthly_active_count = simple_async_mock(100) - self.store.user_last_seen_monthly_active = simple_async_mock() - self.store.is_trial_user = simple_async_mock() + self.store.get_monthly_active_count = AsyncMock(return_value=100) + self.store.user_last_seen_monthly_active = AsyncMock(return_value=None) + self.store.is_trial_user = AsyncMock(return_value=False) appservice = ApplicationService( "abcd", @@ -443,9 +444,9 @@ class AuthTestCase(unittest.HomeserverTestCase): self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._track_appservice_user_ips = True - self.store.get_monthly_active_count = simple_async_mock(100) - self.store.user_last_seen_monthly_active = simple_async_mock() - self.store.is_trial_user = simple_async_mock() + self.store.get_monthly_active_count = AsyncMock(return_value=100) + self.store.user_last_seen_monthly_active = AsyncMock(return_value=None) + self.store.is_trial_user = AsyncMock(return_value=False) appservice = ApplicationService( "abcd", @@ -473,7 +474,7 @@ class AuthTestCase(unittest.HomeserverTestCase): def test_reserved_threepid(self) -> None: self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._max_mau_value = 1 - self.store.get_monthly_active_count = simple_async_mock(2) + self.store.get_monthly_active_count = AsyncMock(return_value=2) threepid = {"medium": "email", "address": "reserved@server.com"} unknown_threepid = {"medium": "email", "address": "unreserved@server.com"} self.auth_blocking._mau_limits_reserved_threepids = [threepid] diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 66753c60c4..6ac5fc1ae7 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -13,14 +13,13 @@ # limitations under the License. import re from typing import Any, Generator -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.internet import defer from synapse.appservice import ApplicationService, Namespace from tests import unittest -from tests.test_utils import simple_async_mock def _regex(regex: str, exclusive: bool = True) -> Namespace: @@ -43,8 +42,8 @@ class ApplicationServiceTestCase(unittest.TestCase): ) self.store = Mock() - self.store.get_aliases_for_room = simple_async_mock([]) - self.store.get_local_users_in_room = simple_async_mock([]) + self.store.get_aliases_for_room = AsyncMock(return_value=[]) + self.store.get_local_users_in_room = AsyncMock(return_value=[]) @defer.inlineCallbacks def test_regex_user_id_prefix_match( @@ -127,10 +126,10 @@ class ApplicationServiceTestCase(unittest.TestCase): self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) - self.store.get_aliases_for_room = simple_async_mock( - ["#irc_foobar:matrix.org", "#athing:matrix.org"] + self.store.get_aliases_for_room = AsyncMock( + return_value=["#irc_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_local_users_in_room = simple_async_mock([]) + self.store.get_local_users_in_room = AsyncMock(return_value=[]) self.assertTrue( ( yield self.service.is_interested_in_event( @@ -182,10 +181,10 @@ class ApplicationServiceTestCase(unittest.TestCase): self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) - self.store.get_aliases_for_room = simple_async_mock( - ["#xmpp_foobar:matrix.org", "#athing:matrix.org"] + self.store.get_aliases_for_room = AsyncMock( + return_value=["#xmpp_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_local_users_in_room = simple_async_mock([]) + self.store.get_local_users_in_room = AsyncMock(return_value=[]) self.assertFalse( ( yield defer.ensureDeferred( @@ -205,8 +204,10 @@ class ApplicationServiceTestCase(unittest.TestCase): ) self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" - self.store.get_aliases_for_room = simple_async_mock(["#irc_barfoo:matrix.org"]) - self.store.get_local_users_in_room = simple_async_mock([]) + self.store.get_aliases_for_room = AsyncMock( + return_value=["#irc_barfoo:matrix.org"] + ) + self.store.get_local_users_in_room = AsyncMock(return_value=[]) self.assertTrue( ( yield self.service.is_interested_in_event( @@ -235,10 +236,10 @@ class ApplicationServiceTestCase(unittest.TestCase): def test_member_list_match(self) -> Generator["defer.Deferred[Any]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. - self.store.get_local_users_in_room = simple_async_mock( - ["@alice:here", "@irc_fo:here", "@bob:here"] + self.store.get_local_users_in_room = AsyncMock( + return_value=["@alice:here", "@irc_fo:here", "@bob:here"] ) - self.store.get_aliases_for_room = simple_async_mock([]) + self.store.get_aliases_for_room = AsyncMock(return_value=[]) self.event.sender = "@xmpp_foobar:matrix.org" self.assertTrue( diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index e2a3bad065..445919417e 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Sequence, Tuple, cast -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from typing_extensions import TypeAlias @@ -37,7 +37,6 @@ from synapse.types import DeviceListUpdates, JsonDict from synapse.util import Clock from tests import unittest -from tests.test_utils import simple_async_mock from ..utils import MockClock @@ -62,10 +61,12 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): txn = Mock(id=txn_id, service=service, events=events) # mock methods - self.store.get_appservice_state = simple_async_mock(ApplicationServiceState.UP) - txn.send = simple_async_mock(True) - txn.complete = simple_async_mock(True) - self.store.create_appservice_txn = simple_async_mock(txn) + self.store.get_appservice_state = AsyncMock( + return_value=ApplicationServiceState.UP + ) + txn.send = AsyncMock(return_value=True) + txn.complete = AsyncMock(return_value=True) + self.store.create_appservice_txn = AsyncMock(return_value=txn) # actual call self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) @@ -89,10 +90,10 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): events = [Mock(), Mock()] txn = Mock(id="idhere", service=service, events=events) - self.store.get_appservice_state = simple_async_mock( - ApplicationServiceState.DOWN + self.store.get_appservice_state = AsyncMock( + return_value=ApplicationServiceState.DOWN ) - self.store.create_appservice_txn = simple_async_mock(txn) + self.store.create_appservice_txn = AsyncMock(return_value=txn) # actual call self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) @@ -118,10 +119,12 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): txn = Mock(id=txn_id, service=service, events=events) # mock methods - self.store.get_appservice_state = simple_async_mock(ApplicationServiceState.UP) - self.store.set_appservice_state = simple_async_mock(True) - txn.send = simple_async_mock(False) # fails to send - self.store.create_appservice_txn = simple_async_mock(txn) + self.store.get_appservice_state = AsyncMock( + return_value=ApplicationServiceState.UP + ) + self.store.set_appservice_state = AsyncMock(return_value=True) + txn.send = AsyncMock(return_value=False) # fails to send + self.store.create_appservice_txn = AsyncMock(return_value=txn) # actual call self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) @@ -150,7 +153,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.as_api = Mock() self.store = Mock() self.service = Mock() - self.callback = simple_async_mock() + self.callback = AsyncMock() self.recoverer = _Recoverer( clock=cast(Clock, self.clock), as_api=self.as_api, @@ -174,8 +177,8 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.recoverer.recover() # shouldn't have called anything prior to waiting for exp backoff self.assertEqual(0, self.store.get_oldest_unsent_txn.call_count) - txn.send = simple_async_mock(True) - txn.complete = simple_async_mock(None) + txn.send = AsyncMock(return_value=True) + txn.complete = AsyncMock(return_value=None) # wait for exp backoff self.clock.advance_time(2) self.assertEqual(1, txn.send.call_count) @@ -202,8 +205,8 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.recoverer.recover() self.assertEqual(0, self.store.get_oldest_unsent_txn.call_count) - txn.send = simple_async_mock(False) - txn.complete = simple_async_mock(None) + txn.send = AsyncMock(return_value=False) + txn.complete = AsyncMock(return_value=None) self.clock.advance_time(2) self.assertEqual(1, txn.send.call_count) self.assertEqual(0, txn.complete.call_count) @@ -216,7 +219,7 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.assertEqual(3, txn.send.call_count) self.assertEqual(0, txn.complete.call_count) self.assertEqual(0, self.callback.call_count) - txn.send = simple_async_mock(True) # successfully send the txn + txn.send = AsyncMock(return_value=True) # successfully send the txn pop_txn = True # returns the txn the first time, then no more. self.clock.advance_time(16) self.assertEqual(1, txn.send.call_count) # new mock reset call count @@ -244,7 +247,7 @@ class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: "MemoryReactor", clock: Clock, hs: HomeServer) -> None: self.scheduler = ApplicationServiceScheduler(hs) self.txn_ctrl = Mock() - self.txn_ctrl.send = simple_async_mock() + self.txn_ctrl.send = AsyncMock() # Replace instantiated _TransactionController instances with our Mock self.scheduler.txn_ctrl = self.txn_ctrl diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index 6fb1f1bd6e..0fcfe38efa 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Iterable, List, Optional, Set, Tuple, Union -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock import attr @@ -30,7 +30,6 @@ from synapse.types import JsonDict, StreamToken, create_requester from synapse.util import Clock from tests.handlers.test_sync import generate_sync_config -from tests.test_utils import simple_async_mock from tests.unittest import ( FederatingHomeserverTestCase, HomeserverTestCase, @@ -157,7 +156,7 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. self.fed_transport_client = Mock(spec=["send_transaction"]) - self.fed_transport_client.send_transaction = simple_async_mock({}) + self.fed_transport_client.send_transaction = AsyncMock(return_value={}) hs = self.setup_test_homeserver( federation_transport_client=self.fed_transport_client, diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 5e2ae82cd4..4bd0facd65 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -36,7 +36,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest -from tests.test_utils import event_injection, simple_async_mock +from tests.test_utils import event_injection from tests.unittest import override_config from tests.utils import MockClock @@ -399,7 +399,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): self.hs = hs # Mock the ApplicationServiceScheduler's _TransactionController's send method so that # we can track any outgoing ephemeral events - self.send_mock = simple_async_mock() + self.send_mock = AsyncMock() hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[assignment] # Mock out application services, and allow defining our own in tests @@ -897,7 +897,7 @@ class ApplicationServicesHandlerDeviceListsTestCase(unittest.HomeserverTestCase) # Mock ApplicationServiceApi's put_json, so we can verify the raw JSON that # will be sent over the wire - self.put_json = simple_async_mock() + self.put_json = AsyncMock() hs.get_application_service_api().put_json = self.put_json # type: ignore[assignment] # Mock out application services, and allow defining our own in tests @@ -1003,7 +1003,7 @@ class ApplicationServicesHandlerOtkCountsTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Mock the ApplicationServiceScheduler's _TransactionController's send method so that # we can track what's going out - self.send_mock = simple_async_mock() + self.send_mock = AsyncMock() hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[assignment] # We assign to a method. # Define an application service for the tests diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 63aad0d10c..2cb24add20 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -20,7 +20,6 @@ from synapse.handlers.cas import CasResponse from synapse.server import HomeServer from synapse.util import Clock -from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config # These are a few constants that are used as config parameters in the tests. @@ -61,7 +60,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] cas_response = CasResponse("test_user", {}) request = _mock_request() @@ -89,7 +88,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] # Map a user via SSO. cas_response = CasResponse("test_user", {}) @@ -129,7 +128,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] cas_response = CasResponse("föö", {}) request = _mock_request() @@ -160,7 +159,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] # The response doesn't have the proper userGroup or department. cas_response = CasResponse("test_user", {}) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index b891e84690..9152694653 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -39,7 +39,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock -from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock +from tests.test_utils import FakeResponse, get_awaitable_result from tests.unittest import HomeserverTestCase, skip_unless from tests.utils import mock_getRawHeaders @@ -147,7 +147,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_inactive_token(self) -> None: """The handler should return a 403 where the token is inactive.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={"active": False}, @@ -166,7 +166,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_no_scope(self) -> None: """The handler should return a 403 where no scope is given.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={"active": True}, @@ -185,7 +185,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_user_no_subject(self) -> None: """The handler should return a 500 when no subject is present.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={"active": True, "scope": " ".join([MATRIX_USER_SCOPE])}, @@ -204,7 +204,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_no_user_scope(self) -> None: """The handler should return a 500 when no subject is present.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -227,7 +227,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_admin_not_user(self) -> None: """The handler should raise when the scope has admin right but not user.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -251,7 +251,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_admin(self) -> None: """The handler should return a requester with admin rights.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -281,7 +281,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_admin_highest_privilege(self) -> None: """The handler should resolve to the most permissive scope.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -313,7 +313,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_user(self) -> None: """The handler should return a requester with normal user rights.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -344,7 +344,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): """The handler should return a requester with normal user rights and an user ID matching the one specified in query param `user_id`""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -378,7 +378,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_user_with_device(self) -> None: """The handler should return a requester with normal user rights and a device ID.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -408,7 +408,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_multiple_devices(self) -> None: """The handler should raise an error if multiple devices are found in the scope.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -433,7 +433,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_guest_not_allowed(self) -> None: """The handler should return an insufficient scope error.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -463,7 +463,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_active_guest_allowed(self) -> None: """The handler should return a requester with guest user rights and a device ID.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -499,19 +499,19 @@ class MSC3861OAuthDelegation(HomeserverTestCase): request.requestHeaders.getRawHeaders = mock_getRawHeaders() # The introspection endpoint is returning an error. - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse(code=500, body=b"Internal Server Error") ) error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) self.assertEqual(error.value.code, 503) # The introspection endpoint request fails. - self.http_client.request = simple_async_mock(raises=Exception()) + self.http_client.request = AsyncMock(side_effect=Exception()) error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) self.assertEqual(error.value.code, 503) # The introspection endpoint does not return a JSON object. - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload=["this is an array", "not an object"] ) @@ -520,7 +520,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self.assertEqual(error.value.code, 503) # The introspection endpoint does not return valid JSON. - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse(code=200, body=b"this is not valid JSON") ) error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) @@ -528,7 +528,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_introspection_token_cache(self) -> None: access_token = "open_sesame" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={"active": "true", "scope": "guest", "jti": access_token}, @@ -559,7 +559,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): # test that if a cached token is expired, a fresh token will be pulled from authorizing server - first add a # token with a soon-to-expire `exp` field to the cache - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ @@ -640,7 +640,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_cross_signing(self) -> None: """Try uploading device keys with OAuth delegation enabled.""" - self.http_client.request = simple_async_mock( + self.http_client.request = AsyncMock( return_value=FakeResponse.json( code=200, payload={ diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 0a8bae54fb..9b2c7812cc 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -13,7 +13,7 @@ # limitations under the License. import os from typing import Any, Awaitable, ContextManager, Dict, Optional, Tuple -from unittest.mock import ANY, Mock, patch +from unittest.mock import ANY, AsyncMock, Mock, patch from urllib.parse import parse_qs, urlparse import pymacaroons @@ -28,7 +28,7 @@ from synapse.util import Clock from synapse.util.macaroons import get_value_from_macaroon from synapse.util.stringutils import random_string -from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock +from tests.test_utils import FakeResponse, get_awaitable_result from tests.test_utils.oidc import FakeAuthorizationGrant, FakeOidcServer from tests.unittest import HomeserverTestCase, override_config @@ -164,7 +164,7 @@ class OidcHandlerTestCase(HomeserverTestCase): auth_handler = hs.get_auth_handler() # Mock the complete SSO login method. - self.complete_sso_login = simple_async_mock() + self.complete_sso_login = AsyncMock() auth_handler.complete_sso_login = self.complete_sso_login # type: ignore[assignment] return hs diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index b5c772a7ae..6e666d7bed 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Dict, Optional, Set, Tuple -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock import attr @@ -25,7 +25,6 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock -from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config # Check if we have the dependencies to run the tests. @@ -134,7 +133,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] # send a mocked-up SAML response to the callback saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) @@ -164,7 +163,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] # Map a user via SSO. saml_response = FakeAuthnResponse( @@ -206,7 +205,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] # mock out the error renderer too sso_handler = self.hs.get_sso_handler() @@ -227,7 +226,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler and error renderer auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] sso_handler = self.hs.get_sso_handler() sso_handler.render_error = Mock(return_value=None) # type: ignore[assignment] @@ -312,7 +311,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] # The response doesn't have the proper userGroup or department. saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index fe631d7ecb..9ce9326190 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor @@ -33,7 +33,6 @@ from synapse.util import Clock from tests.events.test_presence_router import send_presence_update, sync_presence from tests.replication._base import BaseMultiWorkerStreamTestCase -from tests.test_utils import simple_async_mock from tests.test_utils.event_injection import inject_member_event from tests.unittest import HomeserverTestCase, override_config @@ -70,7 +69,7 @@ class ModuleApiTestCase(BaseModuleApiTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. self.fed_transport_client = Mock(spec=["send_transaction"]) - self.fed_transport_client.send_transaction = simple_async_mock({}) + self.fed_transport_client.send_transaction = AsyncMock(return_value={}) return self.setup_test_homeserver( federation_transport_client=self.fed_transport_client, @@ -579,9 +578,7 @@ class ModuleApiTestCase(BaseModuleApiTestCase): """Test that the module API can join a remote room.""" # Necessary to fake a remote join. fake_stream_id = 1 - mocked_remote_join = simple_async_mock( - return_value=("fake-event-id", fake_stream_id) - ) + mocked_remote_join = AsyncMock(return_value=("fake-event-id", fake_stream_id)) self.hs.get_room_member_handler()._remote_join = mocked_remote_join # type: ignore[assignment] fake_remote_host = f"{self.module_api.server_name}-remote" diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 937e6ebb7d..a3880ac171 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Any, Optional -from unittest.mock import patch +from unittest.mock import AsyncMock, patch from parameterized import parameterized @@ -28,7 +28,6 @@ from synapse.server import HomeServer from synapse.types import JsonDict, create_requester from synapse.util import Clock -from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -191,7 +190,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): # Mock the method which calculates push rules -- we do this instead of # e.g. checking the results in the database because we want to ensure # that code isn't even running. - bulk_evaluator._action_for_event_by_user = simple_async_mock() # type: ignore[assignment] + bulk_evaluator._action_for_event_by_user = AsyncMock() # type: ignore[assignment] # Ensure no actions are generated! self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) diff --git a/tests/rest/client/test_notifications.py b/tests/rest/client/test_notifications.py index 700f6587a0..41ceb3db51 100644 --- a/tests/rest/client/test_notifications.py +++ b/tests/rest/client/test_notifications.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.test.proto_helpers import MemoryReactor @@ -20,7 +20,6 @@ from synapse.rest.client import login, notifications, receipts, room from synapse.server import HomeServer from synapse.util import Clock -from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase @@ -45,7 +44,7 @@ class HTTPPusherTests(HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. fed_transport_client = Mock(spec=["send_transaction"]) - fed_transport_client.send_transaction = simple_async_mock({}) + fed_transport_client.send_transaction = AsyncMock(return_value={}) return self.setup_test_homeserver( federation_transport_client=fed_transport_client, diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 2af7280ba3..52beb4e89d 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -32,7 +32,6 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.test_utils import simple_async_mock from tests.unittest import override_config @@ -348,8 +347,8 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): # Mock out the AsyncContextManager class MockCM: - __aenter__ = simple_async_mock(return_value=None) - __aexit__ = simple_async_mock(return_value=None) + __aenter__ = AsyncMock(return_value=None) + __aexit__ = AsyncMock(return_value=None) self._update_ctx_manager = MockCM diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 21e112a8b5..fa731426cd 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -19,8 +19,7 @@ import json import sys import warnings from binascii import unhexlify -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, TypeVar -from unittest.mock import Mock +from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, TypeVar import attr import zope.interface @@ -62,10 +61,6 @@ def setup_awaitable_errors() -> Callable[[], None]: """ warnings.simplefilter("error", RuntimeWarning) - # unraisablehook was added in Python 3.8. - if not hasattr(sys, "unraisablehook"): - return lambda: None - # State shared between unraisablehook and check_for_unraisable_exceptions. unraisable_exceptions = [] orig_unraisablehook = sys.unraisablehook @@ -88,18 +83,6 @@ def setup_awaitable_errors() -> Callable[[], None]: return cleanup -def simple_async_mock( - return_value: Optional[TV] = None, raises: Optional[Exception] = None -) -> Mock: - # AsyncMock is not available in python3.5, this mimics part of its behaviour - async def cb(*args: Any, **kwargs: Any) -> Optional[TV]: - if raises: - raise raises - return return_value - - return Mock(side_effect=cb) - - # Type ignore: it does not fully implement IResponse, but is good enough for tests @zope.interface.implementer(IResponse) @attr.s(slots=True, frozen=True, auto_attribs=True) From fcf7a5759efd9bd81838baf298e80e79218f3bf0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 25 Aug 2023 12:11:40 -0400 Subject: [PATCH 375/562] Send proper JSON POST data to /publicRooms (#16185) The include_all_networks was previously sent in the JSON body as string "true" and "false" instead of boolean true and false. --- changelog.d/16185.bugfix | 1 + synapse/federation/transport/client.py | 16 ++++++---------- 2 files changed, 7 insertions(+), 10 deletions(-) create mode 100644 changelog.d/16185.bugfix diff --git a/changelog.d/16185.bugfix b/changelog.d/16185.bugfix new file mode 100644 index 0000000000..e62c9c7a0d --- /dev/null +++ b/changelog.d/16185.bugfix @@ -0,0 +1 @@ +Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `include_all_networks` as a string. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 0b17f713ea..5ce3f345cb 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -475,13 +475,11 @@ class TransportLayerClient: See synapse.federation.federation_client.FederationClient.get_public_rooms for more information. """ + path = _create_v1_path("/publicRooms") + if search_filter: # this uses MSC2197 (Search Filtering over Federation) - path = _create_v1_path("/publicRooms") - - data: Dict[str, Any] = { - "include_all_networks": "true" if include_all_networks else "false" - } + data: Dict[str, Any] = {"include_all_networks": include_all_networks} if third_party_instance_id: data["third_party_instance_id"] = third_party_instance_id if limit: @@ -505,17 +503,15 @@ class TransportLayerClient: ) raise else: - path = _create_v1_path("/publicRooms") - args: Dict[str, Union[str, Iterable[str]]] = { "include_all_networks": "true" if include_all_networks else "false" } if third_party_instance_id: - args["third_party_instance_id"] = (third_party_instance_id,) + args["third_party_instance_id"] = third_party_instance_id if limit: - args["limit"] = [str(limit)] + args["limit"] = str(limit) if since_token: - args["since"] = [since_token] + args["since"] = since_token try: response = await self.client.get_json( From 82699428e392f63c269e8b5d4d4c4d0afc11684b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 25 Aug 2023 14:10:31 -0400 Subject: [PATCH 376/562] Validate input to POST /key/v2/query endpoint. (#16183) To avoid 500 internal server errors with garbage input. --- changelog.d/16183.misc | 1 + synapse/rest/key/v2/remote_key_resource.py | 39 ++++++++++++++++------ 2 files changed, 30 insertions(+), 10 deletions(-) create mode 100644 changelog.d/16183.misc diff --git a/changelog.d/16183.misc b/changelog.d/16183.misc new file mode 100644 index 0000000000..305d5baa6e --- /dev/null +++ b/changelog.d/16183.misc @@ -0,0 +1 @@ +Improve error reporting of invalid data passed to `/_matrix/key/v2/query`. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 981fd1f58a..0aaa838d04 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -16,6 +16,7 @@ import logging import re from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple +from pydantic import Extra, StrictInt, StrictStr from signedjson.sign import sign_json from twisted.web.server import Request @@ -24,9 +25,10 @@ from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, + parse_and_validate_json_object_from_request, parse_integer, - parse_json_object_from_request, ) +from synapse.rest.models import RequestBodyModel from synapse.storage.keys import FetchKeyResultForRemote from synapse.types import JsonDict from synapse.util import json_decoder @@ -38,6 +40,13 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +class _KeyQueryCriteriaDataModel(RequestBodyModel): + class Config: + extra = Extra.allow + + minimum_valid_until_ts: Optional[StrictInt] + + class RemoteKey(RestServlet): """HTTP resource for retrieving the TLS certificate and NACL signature verification keys for a collection of servers. Checks that the reported @@ -96,6 +105,9 @@ class RemoteKey(RestServlet): CATEGORY = "Federation requests" + class PostBody(RequestBodyModel): + server_keys: Dict[StrictStr, Dict[StrictStr, _KeyQueryCriteriaDataModel]] + def __init__(self, hs: "HomeServer"): self.fetcher = ServerKeyFetcher(hs) self.store = hs.get_datastores().main @@ -137,24 +149,29 @@ class RemoteKey(RestServlet): ) minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts") - arguments = {} - if minimum_valid_until_ts is not None: - arguments["minimum_valid_until_ts"] = minimum_valid_until_ts - query = {server: {key_id: arguments}} + query = { + server: { + key_id: _KeyQueryCriteriaDataModel( + minimum_valid_until_ts=minimum_valid_until_ts + ) + } + } else: query = {server: {}} return 200, await self.query_keys(query, query_remote_on_cache_miss=True) async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) + content = parse_and_validate_json_object_from_request(request, self.PostBody) - query = content["server_keys"] + query = content.server_keys return 200, await self.query_keys(query, query_remote_on_cache_miss=True) async def query_keys( - self, query: JsonDict, query_remote_on_cache_miss: bool = False + self, + query: Dict[str, Dict[str, _KeyQueryCriteriaDataModel]], + query_remote_on_cache_miss: bool = False, ) -> JsonDict: logger.info("Handling query for keys %r", query) @@ -196,8 +213,10 @@ class RemoteKey(RestServlet): else: ts_added_ms = key_result.added_ts ts_valid_until_ms = key_result.valid_until_ts - req_key = query.get(server_name, {}).get(key_id, {}) - req_valid_until = req_key.get("minimum_valid_until_ts") + req_key = query.get(server_name, {}).get( + key_id, _KeyQueryCriteriaDataModel(minimum_valid_until_ts=None) + ) + req_valid_until = req_key.minimum_valid_until_ts if req_valid_until is not None: if ts_valid_until_ms < req_valid_until: logger.debug( From ed6de4b2d40e07f7076b7bcb9507aa3e360fb19b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 25 Aug 2023 14:10:47 -0400 Subject: [PATCH 377/562] service-identity, incremental, and setuptools-rust now have type hints. (#16186) --- changelog.d/16186.misc | 1 + mypy.ini | 9 --------- 2 files changed, 1 insertion(+), 9 deletions(-) create mode 100644 changelog.d/16186.misc diff --git a/changelog.d/16186.misc b/changelog.d/16186.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16186.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 311a951aa8..fb5f44c939 100644 --- a/mypy.ini +++ b/mypy.ini @@ -87,18 +87,9 @@ ignore_missing_imports = True [mypy-saml2.*] ignore_missing_imports = True -[mypy-service_identity.*] -ignore_missing_imports = True - [mypy-srvlookup.*] ignore_missing_imports = True # https://github.com/twisted/treq/pull/366 [mypy-treq.*] ignore_missing_imports = True - -[mypy-incremental.*] -ignore_missing_imports = True - -[mypy-setuptools_rust.*] -ignore_missing_imports = True From 84f441f88f51d3f94e1616e1e5507df0dadb6de8 Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 25 Aug 2023 21:05:10 +0200 Subject: [PATCH 378/562] Prepare unit tests for Python 3.12 (#16099) --- .ci/scripts/calculate_jobs.py | 3 +- changelog.d/16099.misc | 1 + poetry.lock | 22 +++++++-------- synapse/logging/_terse_json.py | 1 + tests/handlers/test_device.py | 24 ++++++++-------- tests/rest/client/test_login.py | 5 ++-- tests/rest/client/test_register.py | 8 +++--- tests/rest/client/test_relations.py | 40 ++++++++++++++------------- tests/storage/test_client_ips.py | 30 ++++++++++---------- tests/storage/test_devices.py | 18 ++++++------ tests/storage/test_end_to_end_keys.py | 10 +++++-- tests/storage/test_room.py | 12 ++++---- tests/test_terms_auth.py | 4 ++- 13 files changed, 94 insertions(+), 84 deletions(-) create mode 100644 changelog.d/16099.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 50e11e6504..661887e209 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -47,10 +47,9 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.9", "3.10", "3.11") + for version in ("3.9", "3.10", "3.11", "3.12.0-rc.1") ) - trial_postgres_tests = [ { "python-version": "3.8", diff --git a/changelog.d/16099.misc b/changelog.d/16099.misc new file mode 100644 index 0000000000..d0e2811366 --- /dev/null +++ b/changelog.d/16099.misc @@ -0,0 +1 @@ +Prepare unit tests for Python 3.12. diff --git a/poetry.lock b/poetry.lock index e62c10da9f..796890c3d8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "alabaster" @@ -544,13 +544,13 @@ files = [ [[package]] name = "elementpath" -version = "4.1.0" +version = "4.1.5" description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and lxml" optional = true python-versions = ">=3.7" files = [ - {file = "elementpath-4.1.0-py3-none-any.whl", hash = "sha256:2b1b524223d70fd6dd63a36b9bc32e4919c96a272c2d1454094c4d85086bc6f8"}, - {file = "elementpath-4.1.0.tar.gz", hash = "sha256:dbd7eba3cf0b3b4934f627ba24851a3e0798ef2bc9104555a4cd831f2e6e8e14"}, + {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, + {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, ] [package.extras] @@ -3207,22 +3207,22 @@ files = [ [[package]] name = "xmlschema" -version = "2.2.2" +version = "2.4.0" description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" files = [ - {file = "xmlschema-2.2.2-py3-none-any.whl", hash = "sha256:557f3632b54b6ff10576736bba62e43db84eb60f6465a83818576cd9ffcc1799"}, - {file = "xmlschema-2.2.2.tar.gz", hash = "sha256:0caa96668807b4b51c42a0fe2b6610752bc59f069615df3e34dcfffb962973fd"}, + {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, + {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, ] [package.dependencies] -elementpath = ">=4.0.0,<5.0.0" +elementpath = ">=4.1.5,<5.0.0" [package.extras] -codegen = ["elementpath (>=4.0.0,<5.0.0)", "jinja2"] -dev = ["Sphinx", "coverage", "elementpath (>=4.0.0,<5.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"] -docs = ["Sphinx", "elementpath (>=4.0.0,<5.0.0)", "jinja2", "sphinx-rtd-theme"] +codegen = ["elementpath (>=4.1.5,<5.0.0)", "jinja2"] +dev = ["Sphinx", "coverage", "elementpath (>=4.1.5,<5.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"] +docs = ["Sphinx", "elementpath (>=4.1.5,<5.0.0)", "jinja2", "sphinx-rtd-theme"] [[package]] name = "zipp" diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index b78d6e17c9..98c6038ff2 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -44,6 +44,7 @@ _IGNORED_LOG_RECORD_ATTRIBUTES = { "processName", "relativeCreated", "stack_info", + "taskName", "thread", "threadName", } diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index dca539d203..55a4f95ef3 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -122,50 +122,50 @@ class DeviceTestCase(unittest.HomeserverTestCase): self.assertEqual(3, len(res)) device_map = {d["device_id"]: d for d in res} - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user1, "device_id": "xyz", "display_name": "display 0", "last_seen_ip": None, "last_seen_ts": None, - }, - device_map["xyz"], + }.items(), + device_map["xyz"].items(), ) - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user1, "device_id": "fco", "display_name": "display 1", "last_seen_ip": "ip1", "last_seen_ts": 1000000, - }, - device_map["fco"], + }.items(), + device_map["fco"].items(), ) - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user1, "device_id": "abc", "display_name": "display 2", "last_seen_ip": "ip3", "last_seen_ts": 3000000, - }, - device_map["abc"], + }.items(), + device_map["abc"].items(), ) def test_get_device(self) -> None: self._record_users() res = self.get_success(self.handler.get_device(user1, "abc")) - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user1, "device_id": "abc", "display_name": "display 2", "last_seen_ip": "ip3", "last_seen_ts": 3000000, - }, - res, + }.items(), + res.items(), ) def test_delete_device(self) -> None: diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 62c32cae5e..a2a6589564 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -581,8 +581,9 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): body, ) self.assertEqual(channel.code, 403, channel.result) - self.assertDictContainsSubset( - {"errcode": Codes.LIMIT_EXCEEDED, "extra": "value"}, channel.json_body + self.assertLessEqual( + {"errcode": Codes.LIMIT_EXCEEDED, "extra": "value"}.items(), + channel.json_body.items(), ) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index b228dba861..c33393dc28 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -75,7 +75,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, msg=channel.result) det_data = {"user_id": user_id, "home_server": self.hs.hostname} - self.assertDictContainsSubset(det_data, channel.json_body) + self.assertLessEqual(det_data.items(), channel.json_body.items()) def test_POST_appservice_registration_no_type(self) -> None: as_token = "i_am_an_app_service" @@ -136,7 +136,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "device_id": device_id, } self.assertEqual(channel.code, 200, msg=channel.result) - self.assertDictContainsSubset(det_data, channel.json_body) + self.assertLessEqual(det_data.items(), channel.json_body.items()) @override_config({"enable_registration": False}) def test_POST_disabled_registration(self) -> None: @@ -157,7 +157,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): det_data = {"home_server": self.hs.hostname, "device_id": "guest_device"} self.assertEqual(channel.code, 200, msg=channel.result) - self.assertDictContainsSubset(det_data, channel.json_body) + self.assertLessEqual(det_data.items(), channel.json_body.items()) def test_POST_disabled_guest_registration(self) -> None: self.hs.config.registration.allow_guest_access = False @@ -267,7 +267,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): "device_id": device_id, } self.assertEqual(channel.code, 200, msg=channel.result) - self.assertDictContainsSubset(det_data, channel.json_body) + self.assertLessEqual(det_data.items(), channel.json_body.items()) # Check the `completed` counter has been incremented and pending is 0 res = self.get_success( diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index d3f6191996..61773fb28c 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -570,7 +570,7 @@ class RelationsTestCase(BaseRelationsTestCase): ) self.assertEqual(200, channel.code, channel.json_body) event_result = channel.json_body - self.assertDictContainsSubset(original_body, event_result["content"]) + self.assertLessEqual(original_body.items(), event_result["content"].items()) # also check /context, which returns the *edited* event channel = self.make_request( @@ -587,14 +587,14 @@ class RelationsTestCase(BaseRelationsTestCase): (context_result, "/context"), ): # The reference metadata should still be intact. - self.assertDictContainsSubset( + self.assertLessEqual( { "m.relates_to": { "event_id": self.parent_id, "rel_type": "m.reference", } - }, - result_event_dict["content"], + }.items(), + result_event_dict["content"].items(), desc, ) @@ -1372,9 +1372,11 @@ class BundledAggregationsTestCase(BaseRelationsTestCase): latest_event_in_thread = thread_summary["latest_event"] # The latest event in the thread should have the edit appear under the # bundled aggregations. - self.assertDictContainsSubset( - {"event_id": edit_event_id, "sender": "@alice:test"}, - latest_event_in_thread["unsigned"]["m.relations"][RelationTypes.REPLACE], + self.assertLessEqual( + {"event_id": edit_event_id, "sender": "@alice:test"}.items(), + latest_event_in_thread["unsigned"]["m.relations"][ + RelationTypes.REPLACE + ].items(), ) def test_aggregation_get_event_for_annotation(self) -> None: @@ -1637,9 +1639,9 @@ class RelationRedactionTestCase(BaseRelationsTestCase): ################################################## self.assertEqual(self._get_related_events(), list(reversed(thread_replies))) relations = self._get_bundled_aggregations() - self.assertDictContainsSubset( - {"count": 3, "current_user_participated": True}, - relations[RelationTypes.THREAD], + self.assertLessEqual( + {"count": 3, "current_user_participated": True}.items(), + relations[RelationTypes.THREAD].items(), ) # The latest event is the last sent event. self.assertEqual( @@ -1658,9 +1660,9 @@ class RelationRedactionTestCase(BaseRelationsTestCase): # The thread should still exist, but the latest event should be updated. self.assertEqual(self._get_related_events(), list(reversed(thread_replies))) relations = self._get_bundled_aggregations() - self.assertDictContainsSubset( - {"count": 2, "current_user_participated": True}, - relations[RelationTypes.THREAD], + self.assertLessEqual( + {"count": 2, "current_user_participated": True}.items(), + relations[RelationTypes.THREAD].items(), ) # And the latest event is the last unredacted event. self.assertEqual( @@ -1677,9 +1679,9 @@ class RelationRedactionTestCase(BaseRelationsTestCase): # Nothing should have changed (except the thread count). self.assertEqual(self._get_related_events(), thread_replies) relations = self._get_bundled_aggregations() - self.assertDictContainsSubset( - {"count": 1, "current_user_participated": True}, - relations[RelationTypes.THREAD], + self.assertLessEqual( + {"count": 1, "current_user_participated": True}.items(), + relations[RelationTypes.THREAD].items(), ) # And the latest event is the last unredacted event. self.assertEqual( @@ -1774,12 +1776,12 @@ class RelationRedactionTestCase(BaseRelationsTestCase): event_ids = self._get_related_events() relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) - self.assertDictContainsSubset( + self.assertLessEqual( { "count": 1, "current_user_participated": True, - }, - relations[RelationTypes.THREAD], + }.items(), + relations[RelationTypes.THREAD].items(), ) self.assertEqual( relations[RelationTypes.THREAD]["latest_event"]["event_id"], diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 12e24d4dbd..6b9692c486 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -65,15 +65,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result[(user_id, device_id)] - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user_id, "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 12345678000, - }, - r, + }.items(), + r.items(), ) def test_insert_new_client_ip_none_device_id(self) -> None: @@ -526,15 +526,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result[(user_id, device_id)] - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user_id, "device_id": device_id, "ip": None, "user_agent": None, "last_seen": None, - }, - r, + }.items(), + r.items(), ) # Register the background update to run again. @@ -561,15 +561,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result[(user_id, device_id)] - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user_id, "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 0, - }, - r, + }.items(), + r.items(), ) def test_old_user_ips_pruned(self) -> None: @@ -640,15 +640,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result2[(user_id, device_id)] - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": user_id, "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 0, - }, - r, + }.items(), + r.items(), ) def test_invalid_user_agents_are_ignored(self) -> None: @@ -777,13 +777,13 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): self.store.get_last_client_ip_by_device(self.user_id, device_id) ) r = result[(self.user_id, device_id)] - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": self.user_id, "device_id": device_id, "ip": expected_ip, "user_agent": "Mozzila pizza", "last_seen": 123456100, - }, - r, + }.items(), + r.items(), ) diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index f03807c8f9..58ab41cf26 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -58,13 +58,13 @@ class DeviceStoreTestCase(HomeserverTestCase): res = self.get_success(self.store.get_device("user_id", "device_id")) assert res is not None - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": "user_id", "device_id": "device_id", "display_name": "display_name", - }, - res, + }.items(), + res.items(), ) def test_get_devices_by_user(self) -> None: @@ -80,21 +80,21 @@ class DeviceStoreTestCase(HomeserverTestCase): res = self.get_success(self.store.get_devices_by_user("user_id")) self.assertEqual(2, len(res.keys())) - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": "user_id", "device_id": "device1", "display_name": "display_name 1", - }, - res["device1"], + }.items(), + res["device1"].items(), ) - self.assertDictContainsSubset( + self.assertLessEqual( { "user_id": "user_id", "device_id": "device2", "display_name": "display_name 2", - }, - res["device2"], + }.items(), + res["device2"].items(), ) def test_count_devices_by_users(self) -> None: diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py index 5fde3b9c78..2033377b52 100644 --- a/tests/storage/test_end_to_end_keys.py +++ b/tests/storage/test_end_to_end_keys.py @@ -38,7 +38,7 @@ class EndToEndKeyStoreTestCase(HomeserverTestCase): self.assertIn("user", res) self.assertIn("device", res["user"]) dev = res["user"]["device"] - self.assertDictContainsSubset(json, dev) + self.assertLessEqual(json.items(), dev.items()) def test_reupload_key(self) -> None: now = 1470174257070 @@ -71,8 +71,12 @@ class EndToEndKeyStoreTestCase(HomeserverTestCase): self.assertIn("user", res) self.assertIn("device", res["user"]) dev = res["user"]["device"] - self.assertDictContainsSubset( - {"key": "value", "unsigned": {"device_display_name": "display_name"}}, dev + self.assertLessEqual( + { + "key": "value", + "unsigned": {"device_display_name": "display_name"}, + }.items(), + dev.items(), ) def test_multiple_devices(self) -> None: diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 71ec74eadc..1e27f2c275 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -44,13 +44,13 @@ class RoomStoreTestCase(HomeserverTestCase): def test_get_room(self) -> None: res = self.get_success(self.store.get_room(self.room.to_string())) assert res is not None - self.assertDictContainsSubset( + self.assertLessEqual( { "room_id": self.room.to_string(), "creator": self.u_creator.to_string(), "is_public": True, - }, - res, + }.items(), + res.items(), ) def test_get_room_unknown_room(self) -> None: @@ -59,13 +59,13 @@ class RoomStoreTestCase(HomeserverTestCase): def test_get_room_with_stats(self) -> None: res = self.get_success(self.store.get_room_with_stats(self.room.to_string())) assert res is not None - self.assertDictContainsSubset( + self.assertLessEqual( { "room_id": self.room.to_string(), "creator": self.u_creator.to_string(), "public": True, - }, - res, + }.items(), + res.items(), ) def test_get_room_with_stats_unknown_room(self) -> None: diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 52424aa087..64a49488c6 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -85,7 +85,9 @@ class TermsTestCase(unittest.HomeserverTestCase): } } self.assertIsInstance(channel.json_body["params"], dict) - self.assertDictContainsSubset(channel.json_body["params"], expected_params) + self.assertLessEqual( + channel.json_body["params"].items(), expected_params.items() + ) # We have to complete the dummy auth stage before completing the terms stage request_data = { From e54c1d4ed32b1f6538463f9839c8b8036ad998a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Aug 2023 07:53:57 -0400 Subject: [PATCH 379/562] Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11 (#16200) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 796890c3d8..c7ade12199 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3002,13 +3002,13 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.10" +version = "2.9.21.11" description = "Typing stubs for psycopg2" optional = false python-versions = "*" files = [ - {file = "types-psycopg2-2.9.21.10.tar.gz", hash = "sha256:c2600892312ae1c34e12f145749795d93dc4eac3ef7dbf8a9c1bfd45385e80d7"}, - {file = "types_psycopg2-2.9.21.10-py3-none-any.whl", hash = "sha256:918224a0731a3650832e46633e720703b5beef7693a064e777d9748654fcf5e5"}, + {file = "types-psycopg2-2.9.21.11.tar.gz", hash = "sha256:d5077eacf90e61db8c0b8eea2fdc9d4a97d7aaa16865fb4bd7034a7571520b4d"}, + {file = "types_psycopg2-2.9.21.11-py3-none-any.whl", hash = "sha256:7a323d7744bc8a882fb5a6f63448e903fc70d3dc0d6da9ec1f9c6c4dc10a7102"}, ] [[package]] From 743860e6a67960136c47d5fa42cc13bfbda7d475 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Aug 2023 07:54:13 -0400 Subject: [PATCH 380/562] Bump types-pyyaml from 6.0.12.10 to 6.0.12.11 (#16199) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index c7ade12199..edc9647721 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3027,13 +3027,13 @@ cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.10" +version = "6.0.12.11" description = "Typing stubs for PyYAML" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.10.tar.gz", hash = "sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97"}, - {file = "types_PyYAML-6.0.12.10-py3-none-any.whl", hash = "sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f"}, + {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"}, + {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, ] [[package]] From c0bbad8a96004576353ddff3dc939f066beca750 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Aug 2023 07:59:27 -0400 Subject: [PATCH 381/562] Bump psycopg2 from 2.9.6 to 2.9.7 (#16196) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/poetry.lock b/poetry.lock index edc9647721..9a1fe66b7b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1744,24 +1744,22 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.6" +version = "2.9.7" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.6" files = [ - {file = "psycopg2-2.9.6-cp310-cp310-win32.whl", hash = "sha256:f7a7a5ee78ba7dc74265ba69e010ae89dae635eea0e97b055fb641a01a31d2b1"}, - {file = "psycopg2-2.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:f75001a1cbbe523e00b0ef896a5a1ada2da93ccd752b7636db5a99bc57c44494"}, - {file = "psycopg2-2.9.6-cp311-cp311-win32.whl", hash = "sha256:53f4ad0a3988f983e9b49a5d9765d663bbe84f508ed655affdb810af9d0972ad"}, - {file = "psycopg2-2.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:b81fcb9ecfc584f661b71c889edeae70bae30d3ef74fa0ca388ecda50b1222b7"}, - {file = "psycopg2-2.9.6-cp36-cp36m-win32.whl", hash = "sha256:11aca705ec888e4f4cea97289a0bf0f22a067a32614f6ef64fcf7b8bfbc53744"}, - {file = "psycopg2-2.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:36c941a767341d11549c0fbdbb2bf5be2eda4caf87f65dfcd7d146828bd27f39"}, - {file = "psycopg2-2.9.6-cp37-cp37m-win32.whl", hash = "sha256:869776630c04f335d4124f120b7fb377fe44b0a7645ab3c34b4ba42516951889"}, - {file = "psycopg2-2.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a8ad4a47f42aa6aec8d061fdae21eaed8d864d4bb0f0cade5ad32ca16fcd6258"}, - {file = "psycopg2-2.9.6-cp38-cp38-win32.whl", hash = "sha256:2362ee4d07ac85ff0ad93e22c693d0f37ff63e28f0615a16b6635a645f4b9214"}, - {file = "psycopg2-2.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:d24ead3716a7d093b90b27b3d73459fe8cd90fd7065cf43b3c40966221d8c394"}, - {file = "psycopg2-2.9.6-cp39-cp39-win32.whl", hash = "sha256:1861a53a6a0fd248e42ea37c957d36950da00266378746588eab4f4b5649e95f"}, - {file = "psycopg2-2.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:ded2faa2e6dfb430af7713d87ab4abbfc764d8d7fb73eafe96a24155f906ebf5"}, - {file = "psycopg2-2.9.6.tar.gz", hash = "sha256:f15158418fd826831b28585e2ab48ed8df2d0d98f502a2b4fe619e7d5ca29011"}, + {file = "psycopg2-2.9.7-cp310-cp310-win32.whl", hash = "sha256:1a6a2d609bce44f78af4556bea0c62a5e7f05c23e5ea9c599e07678995609084"}, + {file = "psycopg2-2.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:b22ed9c66da2589a664e0f1ca2465c29b75aaab36fa209d4fb916025fb9119e5"}, + {file = "psycopg2-2.9.7-cp311-cp311-win32.whl", hash = "sha256:44d93a0109dfdf22fe399b419bcd7fa589d86895d3931b01fb321d74dadc68f1"}, + {file = "psycopg2-2.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:91e81a8333a0037babfc9fe6d11e997a9d4dac0f38c43074886b0d9dead94fe9"}, + {file = "psycopg2-2.9.7-cp37-cp37m-win32.whl", hash = "sha256:d1210fcf99aae6f728812d1d2240afc1dc44b9e6cba526a06fb8134f969957c2"}, + {file = "psycopg2-2.9.7-cp37-cp37m-win_amd64.whl", hash = "sha256:e9b04cbef584310a1ac0f0d55bb623ca3244c87c51187645432e342de9ae81a8"}, + {file = "psycopg2-2.9.7-cp38-cp38-win32.whl", hash = "sha256:d5c5297e2fbc8068d4255f1e606bfc9291f06f91ec31b2a0d4c536210ac5c0a2"}, + {file = "psycopg2-2.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:8275abf628c6dc7ec834ea63f6f3846bf33518907a2b9b693d41fd063767a866"}, + {file = "psycopg2-2.9.7-cp39-cp39-win32.whl", hash = "sha256:c7949770cafbd2f12cecc97dea410c514368908a103acf519f2a346134caa4d5"}, + {file = "psycopg2-2.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:b6bd7d9d3a7a63faae6edf365f0ed0e9b0a1aaf1da3ca146e6b043fb3eb5d723"}, + {file = "psycopg2-2.9.7.tar.gz", hash = "sha256:f00cc35bd7119f1fed17b85bd1007855194dde2cbd8de01ab8ebb17487440ad8"}, ] [[package]] From 1511a5553906b9df18ab2a08b11eed3c9ab1c5ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Aug 2023 08:01:23 -0400 Subject: [PATCH 382/562] Bump regex from 1.9.3 to 1.9.4 (#16195) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad88335f31..be9c3bdfb0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" dependencies = [ "aho-corasick", "memchr", @@ -303,9 +303,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" dependencies = [ "aho-corasick", "memchr", @@ -314,9 +314,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "ryu" From 4379d3ef639ce4082e5b82940e970cad0ac60517 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 28 Aug 2023 09:04:15 -0400 Subject: [PATCH 383/562] Bump setuptools-rust from 1.6.0 to 1.7.0. (#16201) --- changelog.d/16201.misc | 1 + poetry.lock | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16201.misc diff --git a/changelog.d/16201.misc b/changelog.d/16201.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16201.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/poetry.lock b/poetry.lock index 9a1fe66b7b..ef12c1cc9e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "alabaster" @@ -2465,18 +2465,19 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( [[package]] name = "setuptools-rust" -version = "1.6.0" +version = "1.7.0" description = "Setuptools Rust extension plugin" optional = false python-versions = ">=3.7" files = [ - {file = "setuptools-rust-1.6.0.tar.gz", hash = "sha256:c86e734deac330597998bfbc08da45187e6b27837e23bd91eadb320732392262"}, - {file = "setuptools_rust-1.6.0-py3-none-any.whl", hash = "sha256:e28ae09fb7167c44ab34434eb49279307d611547cb56cb9789955cdb54a1aed9"}, + {file = "setuptools-rust-1.7.0.tar.gz", hash = "sha256:c7100999948235a38ae7e555fe199aa66c253dc384b125f5d85473bf81eae3a3"}, + {file = "setuptools_rust-1.7.0-py3-none-any.whl", hash = "sha256:071099885949132a2180d16abf907b60837e74b4085047ba7e9c0f5b365310c1"}, ] [package.dependencies] semantic-version = ">=2.8.2,<3" setuptools = ">=62.4" +tomli = {version = ">=1.2.1", markers = "python_version < \"3.11\""} typing-extensions = ">=3.7.4.3" [[package]] From 224c2bbcfa9f762a282d595b212a3a009eb61dfc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Aug 2023 15:38:45 +0200 Subject: [PATCH 384/562] Bump serde from 1.0.184 to 1.0.188 (#16194) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.184 to 1.0.188. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.184...v1.0.188) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index be9c3bdfb0..4d60f8dcb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.184" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c911f4b04d7385c9035407a4eff5903bf4fe270fa046fda448b69e797f4fff0" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.184" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1df27f5b29406ada06609b2e2f77fb34f6dbb104a457a671cc31dbed237e09e" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", From 501da8ecd8f056fb953fbccb43fc60ba9edb91d5 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 28 Aug 2023 16:03:51 +0200 Subject: [PATCH 385/562] Task scheduler: add replication notify for new task to launch ASAP (#16184) --- changelog.d/16184.misc | 1 + synapse/replication/tcp/commands.py | 12 ++++ synapse/replication/tcp/handler.py | 18 ++++++ synapse/util/task_scheduler.py | 92 ++++++++++++++--------------- tests/util/test_task_scheduler.py | 58 ++++++++++++------ 5 files changed, 114 insertions(+), 67 deletions(-) create mode 100644 changelog.d/16184.misc diff --git a/changelog.d/16184.misc b/changelog.d/16184.misc new file mode 100644 index 0000000000..3c0baddfe1 --- /dev/null +++ b/changelog.d/16184.misc @@ -0,0 +1 @@ +Task scheduler: add replication notify for new task to launch ASAP. diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 10f5c98ff8..58a871c6d9 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -452,6 +452,17 @@ class LockReleasedCommand(Command): return json_encoder.encode([self.instance_name, self.lock_name, self.lock_key]) +class NewActiveTaskCommand(_SimpleCommand): + """Sent to inform instance handling background tasks that a new active task is available to run. + + Format:: + + NEW_ACTIVE_TASK "" + """ + + NAME = "NEW_ACTIVE_TASK" + + _COMMANDS: Tuple[Type[Command], ...] = ( ServerCommand, RdataCommand, @@ -466,6 +477,7 @@ _COMMANDS: Tuple[Type[Command], ...] = ( RemoteServerUpCommand, ClearUserSyncsCommand, LockReleasedCommand, + NewActiveTaskCommand, ) # Map of command name to command type. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 38adcbe1d0..92c5a55acc 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -40,6 +40,7 @@ from synapse.replication.tcp.commands import ( Command, FederationAckCommand, LockReleasedCommand, + NewActiveTaskCommand, PositionCommand, RdataCommand, RemoteServerUpCommand, @@ -238,6 +239,10 @@ class ReplicationCommandHandler: if self._is_master: self._server_notices_sender = hs.get_server_notices_sender() + self._task_scheduler = None + if hs.config.worker.run_background_tasks: + self._task_scheduler = hs.get_task_scheduler() + if hs.config.redis.redis_enabled: # If we're using Redis, it's the background worker that should # receive USER_IP commands and store the relevant client IPs. @@ -663,6 +668,15 @@ class ReplicationCommandHandler: cmd.instance_name, cmd.lock_name, cmd.lock_key ) + async def on_NEW_ACTIVE_TASK( + self, conn: IReplicationConnection, cmd: NewActiveTaskCommand + ) -> None: + """Called when get a new NEW_ACTIVE_TASK command.""" + if self._task_scheduler: + task = await self._task_scheduler.get_task(cmd.data) + if task: + await self._task_scheduler._launch_task(task) + def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" self._connections.append(connection) @@ -776,6 +790,10 @@ class ReplicationCommandHandler: if instance_name == self._instance_name: self.send_command(LockReleasedCommand(instance_name, lock_name, lock_key)) + def send_new_active_task(self, task_id: str) -> None: + """Called when a new task has been scheduled for immediate launch and is ACTIVE.""" + self.send_command(NewActiveTaskCommand(task_id)) + UpdateToken = TypeVar("UpdateToken") UpdateRow = TypeVar("UpdateRow") diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 4aea64b338..9e89aeb748 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -57,14 +57,13 @@ class TaskScheduler: the code launching the task. You can also specify the `result` (and/or an `error`) when returning from the function. - The reconciliation loop runs every 5 mns, so this is not a precise scheduler. When wanting - to launch now, the launch will still not happen before the next loop run. - - Tasks will be run on the worker specified with `run_background_tasks_on` config, - or the main one by default. + The reconciliation loop runs every minute, so this is not a precise scheduler. There is a limit of 10 concurrent tasks, so tasks may be delayed if the pool is already full. In this regard, please take great care that scheduled tasks can actually finished. For now there is no mechanism to stop a running task if it is stuck. + + Tasks will be run on the worker specified with `run_background_tasks_on` config, + or the main one by default. """ # Precision of the scheduler, evaluation of tasks to run will only happen @@ -85,7 +84,7 @@ class TaskScheduler: self._actions: Dict[ str, Callable[ - [ScheduledTask, bool], + [ScheduledTask], Awaitable[Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], ], ] = {} @@ -98,11 +97,13 @@ class TaskScheduler: "handle_scheduled_tasks", self._handle_scheduled_tasks, ) + else: + self.replication_client = hs.get_replication_command_handler() def register_action( self, function: Callable[ - [ScheduledTask, bool], + [ScheduledTask], Awaitable[Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]], ], action_name: str, @@ -115,10 +116,9 @@ class TaskScheduler: calling `schedule_task` but rather in an `__init__` method. Args: - function: The function to be executed for this action. The parameters - passed to the function when launched are the `ScheduledTask` being run, - and a `first_launch` boolean to signal if it's a resumed task or the first - launch of it. The function should return a tuple of new `status`, `result` + function: The function to be executed for this action. The parameter + passed to the function when launched is the `ScheduledTask` being run. + The function should return a tuple of new `status`, `result` and `error` as specified in `ScheduledTask`. action_name: The name of the action to be associated with the function """ @@ -171,6 +171,12 @@ class TaskScheduler: ) await self._store.insert_scheduled_task(task) + if status == TaskStatus.ACTIVE: + if self._run_background_tasks: + await self._launch_task(task) + else: + self.replication_client.send_new_active_task(task.id) + return task.id async def update_task( @@ -265,21 +271,13 @@ class TaskScheduler: Args: id: id of the task to delete """ - if self.task_is_running(id): - raise Exception(f"Task {id} is currently running and can't be deleted") + task = await self.get_task(id) + if task is None: + raise Exception(f"Task {id} does not exist") + if task.status == TaskStatus.ACTIVE: + raise Exception(f"Task {id} is currently ACTIVE and can't be deleted") await self._store.delete_scheduled_task(id) - def task_is_running(self, id: str) -> bool: - """Check if a task is currently running. - - Can only be called from the worker handling the task scheduling. - - Args: - id: id of the task to check - """ - assert self._run_background_tasks - return id in self._running_tasks - async def _handle_scheduled_tasks(self) -> None: """Main loop taking care of launching tasks and cleaning up old ones.""" await self._launch_scheduled_tasks() @@ -288,29 +286,11 @@ class TaskScheduler: async def _launch_scheduled_tasks(self) -> None: """Retrieve and launch scheduled tasks that should be running at that time.""" for task in await self.get_tasks(statuses=[TaskStatus.ACTIVE]): - if not self.task_is_running(task.id): - if ( - len(self._running_tasks) - < TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS - ): - await self._launch_task(task, first_launch=False) - else: - if ( - self._clock.time_msec() - > task.timestamp + TaskScheduler.LAST_UPDATE_BEFORE_WARNING_MS - ): - logger.warn( - f"Task {task.id} (action {task.action}) has seen no update for more than 24h and may be stuck" - ) + await self._launch_task(task) for task in await self.get_tasks( statuses=[TaskStatus.SCHEDULED], max_timestamp=self._clock.time_msec() ): - if ( - not self.task_is_running(task.id) - and len(self._running_tasks) - < TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS - ): - await self._launch_task(task, first_launch=True) + await self._launch_task(task) running_tasks_gauge.set(len(self._running_tasks)) @@ -320,27 +300,27 @@ class TaskScheduler: statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE] ): # FAILED and COMPLETE tasks should never be running - assert not self.task_is_running(task.id) + assert task.id not in self._running_tasks if ( self._clock.time_msec() > task.timestamp + TaskScheduler.KEEP_TASKS_FOR_MS ): await self._store.delete_scheduled_task(task.id) - async def _launch_task(self, task: ScheduledTask, first_launch: bool) -> None: + async def _launch_task(self, task: ScheduledTask) -> None: """Launch a scheduled task now. Args: task: the task to launch - first_launch: `True` if it's the first time is launched, `False` otherwise """ - assert task.action in self._actions + assert self._run_background_tasks + assert task.action in self._actions function = self._actions[task.action] async def wrapper() -> None: try: - (status, result, error) = await function(task, first_launch) + (status, result, error) = await function(task) except Exception: f = Failure() logger.error( @@ -360,6 +340,20 @@ class TaskScheduler: ) self._running_tasks.remove(task.id) + if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: + return + + if ( + self._clock.time_msec() + > task.timestamp + TaskScheduler.LAST_UPDATE_BEFORE_WARNING_MS + ): + logger.warn( + f"Task {task.id} (action {task.action}) has seen no update for more than 24h and may be stuck" + ) + + if task.id in self._running_tasks: + return + self._running_tasks.add(task.id) await self.update_task(task.id, status=TaskStatus.ACTIVE) description = f"{task.id}-{task.action}" diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py index 3a97559bf0..8665aeb50c 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py @@ -22,10 +22,11 @@ from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util import Clock from synapse.util.task_scheduler import TaskScheduler -from tests import unittest +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.unittest import HomeserverTestCase, override_config -class TestTaskScheduler(unittest.HomeserverTestCase): +class TestTaskScheduler(HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.task_scheduler = hs.get_task_scheduler() self.task_scheduler.register_action(self._test_task, "_test_task") @@ -34,7 +35,7 @@ class TestTaskScheduler(unittest.HomeserverTestCase): self.task_scheduler.register_action(self._resumable_task, "_resumable_task") async def _test_task( - self, task: ScheduledTask, first_launch: bool + self, task: ScheduledTask ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: # This test task will copy the parameters to the result result = None @@ -77,7 +78,7 @@ class TestTaskScheduler(unittest.HomeserverTestCase): self.assertIsNone(task) async def _sleeping_task( - self, task: ScheduledTask, first_launch: bool + self, task: ScheduledTask ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: # Sleep for a second await deferLater(self.reactor, 1, lambda: None) @@ -85,24 +86,18 @@ class TestTaskScheduler(unittest.HomeserverTestCase): def test_schedule_lot_of_tasks(self) -> None: """Schedule more than `TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS` tasks and check the behavior.""" - timestamp = self.clock.time_msec() + 30 * 1000 task_ids = [] for i in range(TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS + 1): task_ids.append( self.get_success( self.task_scheduler.schedule_task( "_sleeping_task", - timestamp=timestamp, params={"val": i}, ) ) ) - # The timestamp being 30s after now the task should been executed - # after the first scheduling loop is run - self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) - - # This is to give the time to the sleeping tasks to finish + # This is to give the time to the active tasks to finish self.reactor.advance(1) # Check that only MAX_CONCURRENT_RUNNING_TASKS tasks has run and that one @@ -120,10 +115,11 @@ class TestTaskScheduler(unittest.HomeserverTestCase): ) scheduled_tasks = [ - t for t in tasks if t is not None and t.status == TaskStatus.SCHEDULED + t for t in tasks if t is not None and t.status == TaskStatus.ACTIVE ] self.assertEquals(len(scheduled_tasks), 1) + # We need to wait for the next run of the scheduler loop self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) self.reactor.advance(1) @@ -138,7 +134,7 @@ class TestTaskScheduler(unittest.HomeserverTestCase): ) async def _raising_task( - self, task: ScheduledTask, first_launch: bool + self, task: ScheduledTask ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: raise Exception("raising") @@ -146,15 +142,13 @@ class TestTaskScheduler(unittest.HomeserverTestCase): """Schedule a task raising an exception and check it runs to failure and report exception content.""" task_id = self.get_success(self.task_scheduler.schedule_task("_raising_task")) - self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) - task = self.get_success(self.task_scheduler.get_task(task_id)) assert task is not None self.assertEqual(task.status, TaskStatus.FAILED) self.assertEqual(task.error, "raising") async def _resumable_task( - self, task: ScheduledTask, first_launch: bool + self, task: ScheduledTask ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: if task.result and "in_progress" in task.result: return TaskStatus.COMPLETE, {"success": True}, None @@ -169,8 +163,6 @@ class TestTaskScheduler(unittest.HomeserverTestCase): """Schedule a resumable task and check that it gets properly resumed and complete after simulating a synapse restart.""" task_id = self.get_success(self.task_scheduler.schedule_task("_resumable_task")) - self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) - task = self.get_success(self.task_scheduler.get_task(task_id)) assert task is not None self.assertEqual(task.status, TaskStatus.ACTIVE) @@ -184,3 +176,33 @@ class TestTaskScheduler(unittest.HomeserverTestCase): self.assertEqual(task.status, TaskStatus.COMPLETE) assert task.result is not None self.assertTrue(task.result.get("success")) + + +class TestTaskSchedulerWithBackgroundWorker(BaseMultiWorkerStreamTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.task_scheduler = hs.get_task_scheduler() + self.task_scheduler.register_action(self._test_task, "_test_task") + + async def _test_task( + self, task: ScheduledTask + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + return (TaskStatus.COMPLETE, None, None) + + @override_config({"run_background_tasks_on": "worker1"}) + def test_schedule_task(self) -> None: + """Check that a task scheduled to run now is launch right away on the background worker.""" + bg_worker_hs = self.make_worker_hs( + "synapse.app.generic_worker", + extra_config={"worker_name": "worker1"}, + ) + bg_worker_hs.get_task_scheduler().register_action(self._test_task, "_test_task") + + task_id = self.get_success( + self.task_scheduler.schedule_task( + "_test_task", + ) + ) + + task = self.get_success(self.task_scheduler.get_task(task_id)) + assert task is not None + self.assertEqual(task.status, TaskStatus.COMPLETE) From 1bf143699c0ac8dd53111bfca4628f126d65210d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 28 Aug 2023 11:03:23 -0400 Subject: [PATCH 386/562] Combine logic about not overriding BUSY presence. (#16170) Simplify some of the presence code by reducing duplicated code between worker & non-worker modes. The main change is to push some of the logic from `user_syncing` into `set_state`. This is done by passing whether the user is setting the presence via a `/sync` with a new `is_sync` flag to `set_state`. If this is `true` some additional logic is performed: * Don't override `busy` presence. * Update the `last_user_sync_ts`. * Never update the status message. --- changelog.d/16170.misc | 1 + synapse/handlers/presence.py | 155 +++++++++++---------------- synapse/replication/http/presence.py | 10 +- tests/handlers/test_presence.py | 37 +++++-- 4 files changed, 99 insertions(+), 104 deletions(-) create mode 100644 changelog.d/16170.misc diff --git a/changelog.d/16170.misc b/changelog.d/16170.misc new file mode 100644 index 0000000000..c950b54367 --- /dev/null +++ b/changelog.d/16170.misc @@ -0,0 +1 @@ +Simplify presence code when using workers. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index e8e9db4b91..c395dcdb43 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -151,15 +151,13 @@ class BasePresenceHandler(abc.ABC): self._federation_queue = PresenceFederationQueue(hs, self) - self._busy_presence_enabled = hs.config.experimental.msc3026_enabled - self.VALID_PRESENCE: Tuple[str, ...] = ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, PresenceState.OFFLINE, ) - if self._busy_presence_enabled: + if hs.config.experimental.msc3026_enabled: self.VALID_PRESENCE += (PresenceState.BUSY,) active_presence = self.store.take_presence_startup_info() @@ -255,17 +253,19 @@ class BasePresenceHandler(abc.ABC): self, target_user: UserID, state: JsonDict, - ignore_status_msg: bool = False, force_notify: bool = False, + is_sync: bool = False, ) -> None: """Set the presence state of the user. Args: target_user: The ID of the user to set the presence state of. state: The presence state as a JSON dictionary. - ignore_status_msg: True to ignore the "status_msg" field of the `state` dict. - If False, the user's current status will be updated. force_notify: Whether to force notification of the update to clients. + is_sync: True if this update was from a sync, which results in + *not* overriding a previously set BUSY status, updating the + user's last_user_sync_ts, and ignoring the "status_msg" field of + the `state` dict. """ @abc.abstractmethod @@ -491,23 +491,18 @@ class WorkerPresenceHandler(BasePresenceHandler): if not affect_presence or not self._presence_enabled: return _NullContextManager() - prev_state = await self.current_state_for_user(user_id) - if prev_state.state != PresenceState.BUSY: - # We set state here but pass ignore_status_msg = True as we don't want to - # cause the status message to be cleared. - # Note that this causes last_active_ts to be incremented which is not - # what the spec wants: see comment in the BasePresenceHandler version - # of this function. - await self.set_state( - UserID.from_string(user_id), - {"presence": presence_state}, - ignore_status_msg=True, - ) + # Note that this causes last_active_ts to be incremented which is not + # what the spec wants. + await self.set_state( + UserID.from_string(user_id), + state={"presence": presence_state}, + is_sync=True, + ) curr_sync = self._user_to_num_current_syncs.get(user_id, 0) self._user_to_num_current_syncs[user_id] = curr_sync + 1 - # If we went from no in flight sync to some, notify replication + # If this is the first in-flight sync, notify replication if self._user_to_num_current_syncs[user_id] == 1: self.mark_as_coming_online(user_id) @@ -518,7 +513,7 @@ class WorkerPresenceHandler(BasePresenceHandler): if user_id in self._user_to_num_current_syncs: self._user_to_num_current_syncs[user_id] -= 1 - # If we went from one in flight sync to non, notify replication + # If there are no more in-flight syncs, notify replication if self._user_to_num_current_syncs[user_id] == 0: self.mark_as_going_offline(user_id) @@ -598,17 +593,19 @@ class WorkerPresenceHandler(BasePresenceHandler): self, target_user: UserID, state: JsonDict, - ignore_status_msg: bool = False, force_notify: bool = False, + is_sync: bool = False, ) -> None: """Set the presence state of the user. Args: target_user: The ID of the user to set the presence state of. state: The presence state as a JSON dictionary. - ignore_status_msg: True to ignore the "status_msg" field of the `state` dict. - If False, the user's current status will be updated. force_notify: Whether to force notification of the update to clients. + is_sync: True if this update was from a sync, which results in + *not* overriding a previously set BUSY status, updating the + user's last_user_sync_ts, and ignoring the "status_msg" field of + the `state` dict. """ presence = state["presence"] @@ -626,8 +623,8 @@ class WorkerPresenceHandler(BasePresenceHandler): instance_name=self._presence_writer_instance, user_id=user_id, state=state, - ignore_status_msg=ignore_status_msg, force_notify=force_notify, + is_sync=is_sync, ) async def bump_presence_active_time(self, user: UserID) -> None: @@ -992,45 +989,13 @@ class PresenceHandler(BasePresenceHandler): curr_sync = self.user_to_num_current_syncs.get(user_id, 0) self.user_to_num_current_syncs[user_id] = curr_sync + 1 - prev_state = await self.current_state_for_user(user_id) - - # If they're busy then they don't stop being busy just by syncing, - # so just update the last sync time. - if prev_state.state != PresenceState.BUSY: - # XXX: We set_state separately here and just update the last_active_ts above - # This keeps the logic as similar as possible between the worker and single - # process modes. Using set_state will actually cause last_active_ts to be - # updated always, which is not what the spec calls for, but synapse has done - # this for... forever, I think. - await self.set_state( - UserID.from_string(user_id), - {"presence": presence_state}, - ignore_status_msg=True, - ) - # Retrieve the new state for the logic below. This should come from the - # in-memory cache. - prev_state = await self.current_state_for_user(user_id) - - # To keep the single process behaviour consistent with worker mode, run the - # same logic as `update_external_syncs_row`, even though it looks weird. - if prev_state.state == PresenceState.OFFLINE: - await self._update_states( - [ - prev_state.copy_and_replace( - state=PresenceState.ONLINE, - last_active_ts=self.clock.time_msec(), - last_user_sync_ts=self.clock.time_msec(), - ) - ] - ) - # otherwise, set the new presence state & update the last sync time, - # but don't update last_active_ts as this isn't an indication that - # they've been active (even though it's probably been updated by - # set_state above) - else: - await self._update_states( - [prev_state.copy_and_replace(last_user_sync_ts=self.clock.time_msec())] - ) + # Note that this causes last_active_ts to be incremented which is not + # what the spec wants. + await self.set_state( + UserID.from_string(user_id), + state={"presence": presence_state}, + is_sync=True, + ) async def _end() -> None: try: @@ -1080,32 +1045,27 @@ class PresenceHandler(BasePresenceHandler): process_id, set() ) - updates = [] + # USER_SYNC is sent when a user starts or stops syncing on a remote + # process. (But only for the initial and last device.) + # + # When a user *starts* syncing it also calls set_state(...) which + # will update the state, last_active_ts, and last_user_sync_ts. + # Simply ensure the user is tracked as syncing in this case. + # + # When a user *stops* syncing, update the last_user_sync_ts and mark + # them as no longer syncing. Note this doesn't quite match the + # monolith behaviour, which updates last_user_sync_ts at the end of + # every sync, not just the last in-flight sync. if is_syncing and user_id not in process_presence: - if prev_state.state == PresenceState.OFFLINE: - updates.append( - prev_state.copy_and_replace( - state=PresenceState.ONLINE, - last_active_ts=sync_time_msec, - last_user_sync_ts=sync_time_msec, - ) - ) - else: - updates.append( - prev_state.copy_and_replace(last_user_sync_ts=sync_time_msec) - ) process_presence.add(user_id) - elif user_id in process_presence: - updates.append( - prev_state.copy_and_replace(last_user_sync_ts=sync_time_msec) + elif not is_syncing and user_id in process_presence: + new_state = prev_state.copy_and_replace( + last_user_sync_ts=sync_time_msec ) + await self._update_states([new_state]) - if not is_syncing: process_presence.discard(user_id) - if updates: - await self._update_states(updates) - self.external_process_last_updated_ms[process_id] = self.clock.time_msec() async def update_external_syncs_clear(self, process_id: str) -> None: @@ -1204,17 +1164,19 @@ class PresenceHandler(BasePresenceHandler): self, target_user: UserID, state: JsonDict, - ignore_status_msg: bool = False, force_notify: bool = False, + is_sync: bool = False, ) -> None: """Set the presence state of the user. Args: target_user: The ID of the user to set the presence state of. state: The presence state as a JSON dictionary. - ignore_status_msg: True to ignore the "status_msg" field of the `state` dict. - If False, the user's current status will be updated. force_notify: Whether to force notification of the update to clients. + is_sync: True if this update was from a sync, which results in + *not* overriding a previously set BUSY status, updating the + user's last_user_sync_ts, and ignoring the "status_msg" field of + the `state` dict. """ status_msg = state.get("status_msg", None) presence = state["presence"] @@ -1227,18 +1189,27 @@ class PresenceHandler(BasePresenceHandler): return user_id = target_user.to_string() + now = self.clock.time_msec() prev_state = await self.current_state_for_user(user_id) + # Syncs do not override a previous presence of busy. + # + # TODO: This is a hack for lack of multi-device support. Unfortunately + # removing this requires coordination with clients. + if prev_state.state == PresenceState.BUSY and is_sync: + presence = PresenceState.BUSY + new_fields = {"state": presence} - if not ignore_status_msg: - new_fields["status_msg"] = status_msg + if presence == PresenceState.ONLINE or presence == PresenceState.BUSY: + new_fields["last_active_ts"] = now - if presence == PresenceState.ONLINE or ( - presence == PresenceState.BUSY and self._busy_presence_enabled - ): - new_fields["last_active_ts"] = self.clock.time_msec() + if is_sync: + new_fields["last_user_sync_ts"] = now + else: + # Syncs do not override the status message. + new_fields["status_msg"] = status_msg await self._update_states( [prev_state.copy_and_replace(**new_fields)], force_notify=force_notify diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index db16aac9c2..a24fb9310b 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -73,8 +73,8 @@ class ReplicationPresenceSetState(ReplicationEndpoint): { "state": { ... }, - "ignore_status_msg": false, - "force_notify": false + "force_notify": false, + "is_sync": false } 200 OK @@ -96,13 +96,13 @@ class ReplicationPresenceSetState(ReplicationEndpoint): async def _serialize_payload( # type: ignore[override] user_id: str, state: JsonDict, - ignore_status_msg: bool = False, force_notify: bool = False, + is_sync: bool = False, ) -> JsonDict: return { "state": state, - "ignore_status_msg": ignore_status_msg, "force_notify": force_notify, + "is_sync": is_sync, } async def _handle_request( # type: ignore[override] @@ -111,8 +111,8 @@ class ReplicationPresenceSetState(ReplicationEndpoint): await self._presence_handler.set_state( UserID.from_string(user_id), content["state"], - content["ignore_status_msg"], content["force_notify"], + content.get("is_sync", False), ) return (200, {}) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 1aebcc16ad..a3fdcf7f93 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -641,13 +641,20 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): """Test that if an external process doesn't update the records for a while we time out their syncing users presence. """ - process_id = "1" - # Notify handler that a user is now syncing. + # Create a worker and use it to handle /sync traffic instead. + # This is used to test that presence changes get replicated from workers + # to the main process correctly. + worker_to_sync_against = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "synchrotron"} + ) + worker_presence_handler = worker_to_sync_against.get_presence_handler() + self.get_success( - self.presence_handler.update_external_syncs_row( - process_id, self.user_id, True, self.clock.time_msec() - ) + worker_presence_handler.user_syncing( + self.user_id, True, PresenceState.ONLINE + ), + by=0.1, ) # Check that if we wait a while without telling the handler the user has @@ -820,7 +827,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # This is used to test that presence changes get replicated from workers # to the main process correctly. worker_to_sync_against = self.make_worker_hs( - "synapse.app.generic_worker", {"worker_name": "presence_writer"} + "synapse.app.generic_worker", {"worker_name": "synchrotron"} ) # Set presence to BUSY @@ -832,7 +839,8 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self.get_success( worker_to_sync_against.get_presence_handler().user_syncing( self.user_id, True, PresenceState.ONLINE - ) + ), + by=0.1, ) # Check against the main process that the user's presence did not change. @@ -840,6 +848,21 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # we should still be busy self.assertEqual(state.state, PresenceState.BUSY) + # Advance such that the device would be discarded if it was not busy, + # then pump so _handle_timeouts function to called. + self.reactor.advance(IDLE_TIMER / 1000) + self.reactor.pump([5]) + + # The account should still be busy. + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + self.assertEqual(state.state, PresenceState.BUSY) + + # Ensure that a /presence call can set the user *off* busy. + self._set_presencestate_with_status_msg(PresenceState.ONLINE, status_msg) + + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + self.assertEqual(state.state, PresenceState.ONLINE) + def _set_presencestate_with_status_msg( self, state: str, status_msg: Optional[str] ) -> None: From 40901af5e096cb10ab69141875b071b4ea4ed1e0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 28 Aug 2023 13:08:49 -0400 Subject: [PATCH 387/562] Pass the device ID around in the presence handler (#16171) Refactoring to pass the device ID (in addition to the user ID) through the presence handler (specifically the `user_syncing`, `set_state`, and `bump_presence_active_time` methods and their replication versions). --- changelog.d/16171.misc | 1 + synapse/handlers/events.py | 1 + synapse/handlers/message.py | 9 ++++-- synapse/handlers/presence.py | 46 +++++++++++++++++++++++----- synapse/replication/http/presence.py | 11 ++++--- synapse/rest/client/presence.py | 2 +- synapse/rest/client/read_marker.py | 4 ++- synapse/rest/client/receipts.py | 4 ++- synapse/rest/client/room.py | 4 ++- synapse/rest/client/sync.py | 1 + tests/handlers/test_presence.py | 38 ++++++++++++++++------- 11 files changed, 91 insertions(+), 30 deletions(-) create mode 100644 changelog.d/16171.misc diff --git a/changelog.d/16171.misc b/changelog.d/16171.misc new file mode 100644 index 0000000000..4d709cb56e --- /dev/null +++ b/changelog.d/16171.misc @@ -0,0 +1 @@ +Track per-device information in the presence code. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 33359f6ed7..d12803bf0f 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -67,6 +67,7 @@ class EventStreamHandler: context = await presence_handler.user_syncing( requester.user.to_string(), + requester.device_id, affect_presence=affect_presence, presence_state=PresenceState.ONLINE, ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 3184bfb047..4a15c76a7b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1921,7 +1921,10 @@ class EventCreationHandler: # We don't want to block sending messages on any presence code. This # matters as sometimes presence code can take a while. run_as_background_process( - "bump_presence_active_time", self._bump_active_time, requester.user + "bump_presence_active_time", + self._bump_active_time, + requester.user, + requester.device_id, ) async def _notify() -> None: @@ -1958,10 +1961,10 @@ class EventCreationHandler: logger.info("maybe_kick_guest_users %r", current_state) await self.hs.get_room_member_handler().kick_guest_users(current_state) - async def _bump_active_time(self, user: UserID) -> None: + async def _bump_active_time(self, user: UserID, device_id: Optional[str]) -> None: try: presence = self.hs.get_presence_handler() - await presence.bump_presence_active_time(user) + await presence.bump_presence_active_time(user, device_id) except Exception: logger.exception("Error bumping presence active time") diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c395dcdb43..50c68c86ce 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -165,7 +165,11 @@ class BasePresenceHandler(abc.ABC): @abc.abstractmethod async def user_syncing( - self, user_id: str, affect_presence: bool, presence_state: str + self, + user_id: str, + device_id: Optional[str], + affect_presence: bool, + presence_state: str, ) -> ContextManager[None]: """Returns a context manager that should surround any stream requests from the user. @@ -176,6 +180,7 @@ class BasePresenceHandler(abc.ABC): Args: user_id: the user that is starting a sync + device_id: the user's device that is starting a sync affect_presence: If false this function will be a no-op. Useful for streams that are not associated with an actual client that is being used by a user. @@ -252,6 +257,7 @@ class BasePresenceHandler(abc.ABC): async def set_state( self, target_user: UserID, + device_id: Optional[str], state: JsonDict, force_notify: bool = False, is_sync: bool = False, @@ -260,6 +266,7 @@ class BasePresenceHandler(abc.ABC): Args: target_user: The ID of the user to set the presence state of. + device_id: the device that the user is setting the presence state of. state: The presence state as a JSON dictionary. force_notify: Whether to force notification of the update to clients. is_sync: True if this update was from a sync, which results in @@ -269,7 +276,9 @@ class BasePresenceHandler(abc.ABC): """ @abc.abstractmethod - async def bump_presence_active_time(self, user: UserID) -> None: + async def bump_presence_active_time( + self, user: UserID, device_id: Optional[str] + ) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -381,7 +390,9 @@ class BasePresenceHandler(abc.ABC): # We set force_notify=True here so that this presence update is guaranteed to # increment the presence stream ID (which resending the current user's presence # otherwise would not do). - await self.set_state(UserID.from_string(user_id), state, force_notify=True) + await self.set_state( + UserID.from_string(user_id), None, state, force_notify=True + ) async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool: raise NotImplementedError( @@ -481,7 +492,11 @@ class WorkerPresenceHandler(BasePresenceHandler): self.send_user_sync(user_id, False, last_sync_ms) async def user_syncing( - self, user_id: str, affect_presence: bool, presence_state: str + self, + user_id: str, + device_id: Optional[str], + affect_presence: bool, + presence_state: str, ) -> ContextManager[None]: """Record that a user is syncing. @@ -495,6 +510,7 @@ class WorkerPresenceHandler(BasePresenceHandler): # what the spec wants. await self.set_state( UserID.from_string(user_id), + device_id, state={"presence": presence_state}, is_sync=True, ) @@ -592,6 +608,7 @@ class WorkerPresenceHandler(BasePresenceHandler): async def set_state( self, target_user: UserID, + device_id: Optional[str], state: JsonDict, force_notify: bool = False, is_sync: bool = False, @@ -600,6 +617,7 @@ class WorkerPresenceHandler(BasePresenceHandler): Args: target_user: The ID of the user to set the presence state of. + device_id: the device that the user is setting the presence state of. state: The presence state as a JSON dictionary. force_notify: Whether to force notification of the update to clients. is_sync: True if this update was from a sync, which results in @@ -622,12 +640,15 @@ class WorkerPresenceHandler(BasePresenceHandler): await self._set_state_client( instance_name=self._presence_writer_instance, user_id=user_id, + device_id=device_id, state=state, force_notify=force_notify, is_sync=is_sync, ) - async def bump_presence_active_time(self, user: UserID) -> None: + async def bump_presence_active_time( + self, user: UserID, device_id: Optional[str] + ) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -638,7 +659,9 @@ class WorkerPresenceHandler(BasePresenceHandler): # Proxy request to instance that writes presence user_id = user.to_string() await self._bump_active_client( - instance_name=self._presence_writer_instance, user_id=user_id + instance_name=self._presence_writer_instance, + user_id=user_id, + device_id=device_id, ) @@ -943,7 +966,9 @@ class PresenceHandler(BasePresenceHandler): return await self._update_states(changes) - async def bump_presence_active_time(self, user: UserID) -> None: + async def bump_presence_active_time( + self, user: UserID, device_id: Optional[str] + ) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -966,6 +991,7 @@ class PresenceHandler(BasePresenceHandler): async def user_syncing( self, user_id: str, + device_id: Optional[str], affect_presence: bool = True, presence_state: str = PresenceState.ONLINE, ) -> ContextManager[None]: @@ -977,7 +1003,8 @@ class PresenceHandler(BasePresenceHandler): when users disconnect/reconnect. Args: - user_id + user_id: the user that is starting a sync + device_id: the user's device that is starting a sync affect_presence: If false this function will be a no-op. Useful for streams that are not associated with an actual client that is being used by a user. @@ -993,6 +1020,7 @@ class PresenceHandler(BasePresenceHandler): # what the spec wants. await self.set_state( UserID.from_string(user_id), + device_id, state={"presence": presence_state}, is_sync=True, ) @@ -1163,6 +1191,7 @@ class PresenceHandler(BasePresenceHandler): async def set_state( self, target_user: UserID, + device_id: Optional[str], state: JsonDict, force_notify: bool = False, is_sync: bool = False, @@ -1171,6 +1200,7 @@ class PresenceHandler(BasePresenceHandler): Args: target_user: The ID of the user to set the presence state of. + device_id: the device that the user is setting the presence state of. state: The presence state as a JSON dictionary. force_notify: Whether to force notification of the update to clients. is_sync: True if this update was from a sync, which results in diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index a24fb9310b..6c9e79fb07 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING, Optional, Tuple from twisted.web.server import Request @@ -51,14 +51,14 @@ class ReplicationBumpPresenceActiveTime(ReplicationEndpoint): self._presence_handler = hs.get_presence_handler() @staticmethod - async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override] - return {} + async def _serialize_payload(user_id: str, device_id: Optional[str]) -> JsonDict: # type: ignore[override] + return {"device_id": device_id} async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, JsonDict]: await self._presence_handler.bump_presence_active_time( - UserID.from_string(user_id) + UserID.from_string(user_id), content.get("device_id") ) return (200, {}) @@ -95,11 +95,13 @@ class ReplicationPresenceSetState(ReplicationEndpoint): @staticmethod async def _serialize_payload( # type: ignore[override] user_id: str, + device_id: Optional[str], state: JsonDict, force_notify: bool = False, is_sync: bool = False, ) -> JsonDict: return { + "device_id": device_id, "state": state, "force_notify": force_notify, "is_sync": is_sync, @@ -110,6 +112,7 @@ class ReplicationPresenceSetState(ReplicationEndpoint): ) -> Tuple[int, JsonDict]: await self._presence_handler.set_state( UserID.from_string(user_id), + content.get("device_id"), content["state"], content["force_notify"], content.get("is_sync", False), diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py index 8e193330f8..d578faa969 100644 --- a/synapse/rest/client/presence.py +++ b/synapse/rest/client/presence.py @@ -97,7 +97,7 @@ class PresenceStatusRestServlet(RestServlet): raise SynapseError(400, "Unable to parse state") if self._use_presence: - await self.presence_handler.set_state(user, state) + await self.presence_handler.set_state(user, requester.device_id, state) return 200, {} diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index 4f96e51eeb..1707e51972 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -52,7 +52,9 @@ class ReadMarkerRestServlet(RestServlet): ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - await self.presence_handler.bump_presence_active_time(requester.user) + await self.presence_handler.bump_presence_active_time( + requester.user, requester.device_id + ) body = parse_json_object_from_request(request) diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index 316e7b9982..869a374459 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -94,7 +94,9 @@ class ReceiptRestServlet(RestServlet): Codes.INVALID_PARAM, ) - await self.presence_handler.bump_presence_active_time(requester.user) + await self.presence_handler.bump_presence_active_time( + requester.user, requester.device_id + ) if receipt_type == ReceiptTypes.FULLY_READ: await self.read_marker_handler.received_client_read_marker( diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index dc498001e4..553938ce9d 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -1229,7 +1229,9 @@ class RoomTypingRestServlet(RestServlet): content = parse_json_object_from_request(request) - await self.presence_handler.bump_presence_active_time(requester.user) + await self.presence_handler.bump_presence_active_time( + requester.user, requester.device_id + ) # Limit timeout to stop people from setting silly typing timeouts. timeout = min(content.get("timeout", 30000), 120000) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index d7854ed4fd..42bdd3bb10 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -205,6 +205,7 @@ class SyncRestServlet(RestServlet): context = await self.presence_handler.user_syncing( user.to_string(), + requester.device_id, affect_presence=affect_presence, presence_state=set_presence, ) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index a3fdcf7f93..a987267308 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -524,6 +524,7 @@ class PresenceHandlerInitTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = f"@test:{self.hs.config.server.server_name}" + self.device_id = "dev-1" # Move the reactor to the initial time. self.reactor.advance(1000) @@ -608,7 +609,10 @@ class PresenceHandlerInitTestCase(unittest.HomeserverTestCase): self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000 / 2) self.get_success( presence_handler.user_syncing( - self.user_id, sync_state != PresenceState.OFFLINE, sync_state + self.user_id, + self.device_id, + sync_state != PresenceState.OFFLINE, + sync_state, ) ) @@ -632,6 +636,7 @@ class PresenceHandlerInitTestCase(unittest.HomeserverTestCase): class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): user_id = "@test:server" user_id_obj = UserID.from_string(user_id) + device_id = "dev-1" def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.presence_handler = hs.get_presence_handler() @@ -652,7 +657,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self.get_success( worker_presence_handler.user_syncing( - self.user_id, True, PresenceState.ONLINE + self.user_id, self.device_id, True, PresenceState.ONLINE ), by=0.1, ) @@ -708,7 +713,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # Mark user as offline self.get_success( self.presence_handler.set_state( - self.user_id_obj, {"presence": PresenceState.OFFLINE} + self.user_id_obj, self.device_id, {"presence": PresenceState.OFFLINE} ) ) @@ -740,7 +745,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # Mark user as online again self.get_success( self.presence_handler.set_state( - self.user_id_obj, {"presence": PresenceState.ONLINE} + self.user_id_obj, self.device_id, {"presence": PresenceState.ONLINE} ) ) @@ -769,7 +774,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self.get_success( self.presence_handler.user_syncing( - self.user_id, False, PresenceState.ONLINE + self.user_id, self.device_id, False, PresenceState.ONLINE ) ) @@ -786,7 +791,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self._set_presencestate_with_status_msg(PresenceState.UNAVAILABLE, status_msg) self.get_success( - self.presence_handler.user_syncing(self.user_id, True, PresenceState.ONLINE) + self.presence_handler.user_syncing( + self.user_id, self.device_id, True, PresenceState.ONLINE + ) ) state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) @@ -800,7 +807,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self._set_presencestate_with_status_msg(PresenceState.UNAVAILABLE, status_msg) self.get_success( - self.presence_handler.user_syncing(self.user_id, True, PresenceState.ONLINE) + self.presence_handler.user_syncing( + self.user_id, self.device_id, True, PresenceState.ONLINE + ) ) state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) @@ -838,7 +847,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # /presence/*. self.get_success( worker_to_sync_against.get_presence_handler().user_syncing( - self.user_id, True, PresenceState.ONLINE + self.user_id, self.device_id, True, PresenceState.ONLINE ), by=0.1, ) @@ -875,6 +884,7 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self.get_success( self.presence_handler.set_state( self.user_id_obj, + self.device_id, {"presence": state, "status_msg": status_msg}, ) ) @@ -1116,7 +1126,9 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): # Mark test2 as online, test will be offline with a last_active of 0 self.get_success( self.presence_handler.set_state( - UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} + UserID.from_string("@test2:server"), + "dev-1", + {"presence": PresenceState.ONLINE}, ) ) self.reactor.pump([0]) # Wait for presence updates to be handled @@ -1163,7 +1175,9 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): # Mark test as online self.get_success( self.presence_handler.set_state( - UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE} + UserID.from_string("@test:server"), + "dev-1", + {"presence": PresenceState.ONLINE}, ) ) @@ -1171,7 +1185,9 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): # Note we don't join them to the room yet self.get_success( self.presence_handler.set_state( - UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} + UserID.from_string("@test2:server"), + "dev-1", + {"presence": PresenceState.ONLINE}, ) ) From 692ee2af190a82f2484427d0be773a0ff5282be1 Mon Sep 17 00:00:00 2001 From: Chen Zhang Date: Tue, 29 Aug 2023 02:37:09 -0700 Subject: [PATCH 388/562] Fix inaccurate error message while trying to ban or unban a user with the same or higher PL (#16205) --- changelog.d/16205.bugfix | 1 + synapse/event_auth.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16205.bugfix diff --git a/changelog.d/16205.bugfix b/changelog.d/16205.bugfix new file mode 100644 index 0000000000..97ac92a148 --- /dev/null +++ b/changelog.d/16205.bugfix @@ -0,0 +1 @@ +Fix inaccurate error message while attempting to ban or unban a user with the same or higher PL by spliting the conditional statements. Contributed by @leviosacz. \ No newline at end of file diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 3a260a492b..531bb74f07 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -669,12 +669,18 @@ def _is_membership_change_allowed( errcode=Codes.INSUFFICIENT_POWER, ) elif Membership.BAN == membership: - if user_level < ban_level or user_level <= target_level: + if user_level < ban_level: raise UnstableSpecAuthError( 403, "You don't have permission to ban", errcode=Codes.INSUFFICIENT_POWER, ) + elif user_level <= target_level: + raise UnstableSpecAuthError( + 403, + "You don't have permission to ban this user", + errcode=Codes.INSUFFICIENT_POWER, + ) elif room_version.knock_join_rule and Membership.KNOCK == membership: if join_rule != JoinRules.KNOCK and ( not room_version.knock_restricted_join_rule From 2d72367367ab39fb1762d2fac28990f00b1bdb52 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 29 Aug 2023 08:34:53 -0400 Subject: [PATCH 389/562] Update black & fix the targeted Python versions. (#16187) Black should target Python 3.8 to 3.11. --- changelog.d/16187.misc | 1 + poetry.lock | 51 ++++++++++++++++++++---------------------- pyproject.toml | 9 +++++--- 3 files changed, 31 insertions(+), 30 deletions(-) create mode 100644 changelog.d/16187.misc diff --git a/changelog.d/16187.misc b/changelog.d/16187.misc new file mode 100644 index 0000000000..989147274a --- /dev/null +++ b/changelog.d/16187.misc @@ -0,0 +1 @@ +Bump black version to 23.7.0. diff --git a/poetry.lock b/poetry.lock index ef12c1cc9e..70b443069c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -148,36 +148,33 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.3.0" +version = "23.7.0" description = "The uncompromising code formatter." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"}, - {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"}, - {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"}, - {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"}, - {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"}, - {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"}, - {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"}, - {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"}, - {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"}, - {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"}, - {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"}, - {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"}, - {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"}, - {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"}, - {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"}, - {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"}, - {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"}, - {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"}, - {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"}, - {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"}, - {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"}, - {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"}, - {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"}, - {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"}, - {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"}, + {file = "black-23.7.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587"}, + {file = "black-23.7.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f"}, + {file = "black-23.7.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be"}, + {file = "black-23.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc"}, + {file = "black-23.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995"}, + {file = "black-23.7.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2"}, + {file = "black-23.7.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd"}, + {file = "black-23.7.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a"}, + {file = "black-23.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926"}, + {file = "black-23.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad"}, + {file = "black-23.7.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f"}, + {file = "black-23.7.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3"}, + {file = "black-23.7.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6"}, + {file = "black-23.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a"}, + {file = "black-23.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320"}, + {file = "black-23.7.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9"}, + {file = "black-23.7.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3"}, + {file = "black-23.7.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087"}, + {file = "black-23.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91"}, + {file = "black-23.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491"}, + {file = "black-23.7.0-py3-none-any.whl", hash = "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96"}, + {file = "black-23.7.0.tar.gz", hash = "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index c2421d7257..499dd9532d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ showcontent = true [tool.black] -target-version = ['py37', 'py38', 'py39', 'py310'] +target-version = ['py38', 'py39', 'py310', 'py311'] # black ignores everything in .gitignore by default, see # https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore # Use `extend-exclude` if you want to exclude something in addition to this. @@ -306,9 +306,12 @@ all = [ ] [tool.poetry.dev-dependencies] -# We pin black so that our tests don't start failing on new releases. +# We pin development dependencies in poetry.lock so that our tests don't start +# failing on new releases. Keeping lower bounds loose here means that dependabot +# can bump versions without having to update the content-hash in the lockfile. +# This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" -black = ">=22.3.0" +black = ">=22.7.0" ruff = "0.0.277" # Typechecking From 63b51ef3fbb548cdc7899720f8c40bb65756f655 Mon Sep 17 00:00:00 2001 From: Evilham Date: Tue, 29 Aug 2023 15:33:58 +0200 Subject: [PATCH 390/562] Support IPv6-only SMTP servers (#16155) Use Twisted HostnameEndpoint to connect to SMTP servers (instead of connectTCP/connectSSL) which properly supports IPv6-only servers. --- changelog.d/16155.bugfix | 1 + synapse/handlers/send_email.py | 28 +++++-------- tests/handlers/test_send_email.py | 69 ++++++++++++++++++++++++++----- tests/server.py | 54 +++++++++++++++++++++++- tests/unittest.py | 2 +- 5 files changed, 125 insertions(+), 29 deletions(-) create mode 100644 changelog.d/16155.bugfix diff --git a/changelog.d/16155.bugfix b/changelog.d/16155.bugfix new file mode 100644 index 0000000000..8b2dc04006 --- /dev/null +++ b/changelog.d/16155.bugfix @@ -0,0 +1 @@ +Fix IPv6-related bugs on SMTP settings, adding groundwork to fix similar issues. Contributed by @evilham and @telmich (ungleich.ch). diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 804cc6e81e..05e21509de 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -23,9 +23,11 @@ from pkg_resources import parse_version import twisted from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IOpenSSLContextFactory +from twisted.internet.endpoints import HostnameEndpoint +from twisted.internet.interfaces import IOpenSSLContextFactory, IProtocolFactory from twisted.internet.ssl import optionsForClientTLS from twisted.mail.smtp import ESMTPSender, ESMTPSenderFactory +from twisted.protocols.tls import TLSMemoryBIOFactory from synapse.logging.context import make_deferred_yieldable from synapse.types import ISynapseReactor @@ -97,6 +99,7 @@ async def _sendmail( **kwargs, ) + factory: IProtocolFactory if _is_old_twisted: # before twisted 21.2, we have to override the ESMTPSender protocol to disable # TLS @@ -110,22 +113,13 @@ async def _sendmail( factory = build_sender_factory(hostname=smtphost if enable_tls else None) if force_tls: - reactor.connectSSL( - smtphost, - smtpport, - factory, - optionsForClientTLS(smtphost), - timeout=30, - bindAddress=None, - ) - else: - reactor.connectTCP( - smtphost, - smtpport, - factory, - timeout=30, - bindAddress=None, - ) + factory = TLSMemoryBIOFactory(optionsForClientTLS(smtphost), True, factory) + + endpoint = HostnameEndpoint( + reactor, smtphost, smtpport, timeout=30, bindAddress=None + ) + + await make_deferred_yieldable(endpoint.connect(factory)) await make_deferred_yieldable(d) diff --git a/tests/handlers/test_send_email.py b/tests/handlers/test_send_email.py index 8b6e4a40b6..a066745d70 100644 --- a/tests/handlers/test_send_email.py +++ b/tests/handlers/test_send_email.py @@ -13,19 +13,40 @@ # limitations under the License. -from typing import Callable, List, Tuple +from typing import Callable, List, Tuple, Type, Union +from unittest.mock import patch from zope.interface import implementer from twisted.internet import defer -from twisted.internet.address import IPv4Address +from twisted.internet._sslverify import ClientTLSOptions +from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import ensureDeferred +from twisted.internet.interfaces import IProtocolFactory +from twisted.internet.ssl import ContextFactory from twisted.mail import interfaces, smtp from tests.server import FakeTransport from tests.unittest import HomeserverTestCase, override_config +def TestingESMTPTLSClientFactory( + contextFactory: ContextFactory, + _connectWrapped: bool, + wrappedProtocol: IProtocolFactory, +) -> IProtocolFactory: + """We use this to pass through in testing without using TLS, but + saving the context information to check that it would have happened. + + Note that this is what the MemoryReactor does on connectSSL. + It only saves the contextFactory, but starts the connection with the + underlying Factory. + See: L{twisted.internet.testing.MemoryReactor.connectSSL}""" + + wrappedProtocol._testingContextFactory = contextFactory # type: ignore[attr-defined] + return wrappedProtocol + + @implementer(interfaces.IMessageDelivery) class _DummyMessageDelivery: def __init__(self) -> None: @@ -75,7 +96,13 @@ class _DummyMessage: pass -class SendEmailHandlerTestCase(HomeserverTestCase): +class SendEmailHandlerTestCaseIPv4(HomeserverTestCase): + ip_class: Union[Type[IPv4Address], Type[IPv6Address]] = IPv4Address + + def setUp(self) -> None: + super().setUp() + self.reactor.lookups["localhost"] = "127.0.0.1" + def test_send_email(self) -> None: """Happy-path test that we can send email to a non-TLS server.""" h = self.hs.get_send_email_handler() @@ -89,7 +116,7 @@ class SendEmailHandlerTestCase(HomeserverTestCase): (host, port, client_factory, _timeout, _bindAddress) = self.reactor.tcpClients[ 0 ] - self.assertEqual(host, "localhost") + self.assertEqual(host, self.reactor.lookups["localhost"]) self.assertEqual(port, 25) # wire it up to an SMTP server @@ -105,7 +132,9 @@ class SendEmailHandlerTestCase(HomeserverTestCase): FakeTransport( client_protocol, self.reactor, - peer_address=IPv4Address("TCP", "127.0.0.1", 1234), + peer_address=self.ip_class( + "TCP", self.reactor.lookups["localhost"], 1234 + ), ) ) @@ -118,6 +147,10 @@ class SendEmailHandlerTestCase(HomeserverTestCase): self.assertEqual(str(user), "foo@bar.com") self.assertIn(b"Subject: test subject", msg) + @patch( + "synapse.handlers.send_email.TLSMemoryBIOFactory", + TestingESMTPTLSClientFactory, + ) @override_config( { "email": { @@ -135,17 +168,23 @@ class SendEmailHandlerTestCase(HomeserverTestCase): ) ) # there should be an attempt to connect to localhost:465 - self.assertEqual(len(self.reactor.sslClients), 1) + self.assertEqual(len(self.reactor.tcpClients), 1) ( host, port, client_factory, - contextFactory, _timeout, _bindAddress, - ) = self.reactor.sslClients[0] - self.assertEqual(host, "localhost") + ) = self.reactor.tcpClients[0] + self.assertEqual(host, self.reactor.lookups["localhost"]) self.assertEqual(port, 465) + # We need to make sure that TLS is happenning + self.assertIsInstance( + client_factory._wrappedFactory._testingContextFactory, + ClientTLSOptions, + ) + # And since we use endpoints, they go through reactor.connectTCP + # which works differently to connectSSL on the testing reactor # wire it up to an SMTP server message_delivery = _DummyMessageDelivery() @@ -160,7 +199,9 @@ class SendEmailHandlerTestCase(HomeserverTestCase): FakeTransport( client_protocol, self.reactor, - peer_address=IPv4Address("TCP", "127.0.0.1", 1234), + peer_address=self.ip_class( + "TCP", self.reactor.lookups["localhost"], 1234 + ), ) ) @@ -172,3 +213,11 @@ class SendEmailHandlerTestCase(HomeserverTestCase): user, msg = message_delivery.messages.pop() self.assertEqual(str(user), "foo@bar.com") self.assertIn(b"Subject: test subject", msg) + + +class SendEmailHandlerTestCaseIPv6(SendEmailHandlerTestCaseIPv4): + ip_class = IPv6Address + + def setUp(self) -> None: + super().setUp() + self.reactor.lookups["localhost"] = "::1" diff --git a/tests/server.py b/tests/server.py index ff03d28864..659ccce838 100644 --- a/tests/server.py +++ b/tests/server.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import hashlib +import ipaddress import json import logging import os @@ -45,7 +46,7 @@ import attr from typing_extensions import ParamSpec from zope.interface import implementer -from twisted.internet import address, threads, udp +from twisted.internet import address, tcp, threads, udp from twisted.internet._resolver import SimpleResolverComplexifier from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed from twisted.internet.error import DNSLookupError @@ -567,6 +568,8 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): conn = super().connectTCP( host, port, factory, timeout=timeout, bindAddress=None ) + if self.lookups and host in self.lookups: + validate_connector(conn, self.lookups[host]) callback = self._tcp_callbacks.get((host, port)) if callback: @@ -599,6 +602,55 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): super().advance(0) +def validate_connector(connector: tcp.Connector, expected_ip: str) -> None: + """Try to validate the obtained connector as it would happen when + synapse is running and the conection will be established. + + This method will raise a useful exception when necessary, else it will + just do nothing. + + This is in order to help catch quirks related to reactor.connectTCP, + since when called directly, the connector's destination will be of type + IPv4Address, with the hostname as the literal host that was given (which + could be an IPv6-only host or an IPv6 literal). + + But when called from reactor.connectTCP *through* e.g. an Endpoint, the + connector's destination will contain the specific IP address with the + correct network stack class. + + Note that testing code paths that use connectTCP directly should not be + affected by this check, unless they specifically add a test with a + matching reactor.lookups[HOSTNAME] = "IPv6Literal", where reactor is of + type ThreadedMemoryReactorClock. + For an example of implementing such tests, see test/handlers/send_email.py. + """ + destination = connector.getDestination() + + # We use address.IPv{4,6}Address to check what the reactor thinks it is + # is sending but check for validity with ipaddress.IPv{4,6}Address + # because they fail with IPs on the wrong network stack. + cls_mapping = { + address.IPv4Address: ipaddress.IPv4Address, + address.IPv6Address: ipaddress.IPv6Address, + } + + cls = cls_mapping.get(destination.__class__) + + if cls is not None: + try: + cls(expected_ip) + except Exception as exc: + raise ValueError( + "Invalid IP type and resolution for %s. Expected %s to be %s" + % (destination, expected_ip, cls.__name__) + ) from exc + else: + raise ValueError( + "Unknown address type %s for %s" + % (destination.__class__.__name__, destination) + ) + + class ThreadPool: """ Threadless thread pool. diff --git a/tests/unittest.py b/tests/unittest.py index b0721e060c..40672a4415 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -313,7 +313,7 @@ class HomeserverTestCase(TestCase): servlets: List of servlet registration function. user_id (str): The user ID to assume if auth is hijacked. hijack_auth: Whether to hijack auth to return the user specified - in user_id. + in user_id. """ hijack_auth: ClassVar[bool] = True From 001fc7bd199b335f628908a0c91e44967cef2c2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 09:41:43 -0400 Subject: [PATCH 391/562] Bump ruff from 0.0.277 to 0.0.286 (#16198) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 38 +++++++++++------------ pyproject.toml | 2 +- synapse/config/_base.py | 8 ++--- synapse/config/appservice.py | 2 +- synapse/event_auth.py | 4 +-- synapse/events/utils.py | 4 +-- synapse/events/validator.py | 4 +-- synapse/federation/federation_base.py | 2 +- synapse/federation/federation_client.py | 2 +- synapse/handlers/message.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/media/oembed.py | 2 +- synapse/media/thumbnailer.py | 2 +- synapse/push/bulk_push_rule_evaluator.py | 8 +++-- synapse/rest/admin/__init__.py | 2 +- synapse/rest/admin/registration_tokens.py | 21 ++++++++----- synapse/rest/admin/users.py | 7 +++-- synapse/rest/client/report_event.py | 2 +- synapse/storage/databases/main/events.py | 6 ++-- 19 files changed, 67 insertions(+), 53 deletions(-) diff --git a/poetry.lock b/poetry.lock index 70b443069c..1d37c88328 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2324,28 +2324,28 @@ files = [ [[package]] name = "ruff" -version = "0.0.277" +version = "0.0.286" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.277-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:3250b24333ef419b7a232080d9724ccc4d2da1dbbe4ce85c4caa2290d83200f8"}, - {file = "ruff-0.0.277-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:3e60605e07482183ba1c1b7237eca827bd6cbd3535fe8a4ede28cbe2a323cb97"}, - {file = "ruff-0.0.277-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7baa97c3d7186e5ed4d5d4f6834d759a27e56cf7d5874b98c507335f0ad5aadb"}, - {file = "ruff-0.0.277-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:74e4b206cb24f2e98a615f87dbe0bde18105217cbcc8eb785bb05a644855ba50"}, - {file = "ruff-0.0.277-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:479864a3ccd8a6a20a37a6e7577bdc2406868ee80b1e65605478ad3b8eb2ba0b"}, - {file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:468bfb0a7567443cec3d03cf408d6f562b52f30c3c29df19927f1e0e13a40cd7"}, - {file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f32ec416c24542ca2f9cc8c8b65b84560530d338aaf247a4a78e74b99cd476b4"}, - {file = "ruff-0.0.277-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14a7b2f00f149c5a295f188a643ac25226ff8a4d08f7a62b1d4b0a1dc9f9b85c"}, - {file = "ruff-0.0.277-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9879f59f763cc5628aa01c31ad256a0f4dc61a29355c7315b83c2a5aac932b5"}, - {file = "ruff-0.0.277-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f612e0a14b3d145d90eb6ead990064e22f6f27281d847237560b4e10bf2251f3"}, - {file = "ruff-0.0.277-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:323b674c98078be9aaded5b8b51c0d9c424486566fb6ec18439b496ce79e5998"}, - {file = "ruff-0.0.277-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3a43fbe026ca1a2a8c45aa0d600a0116bec4dfa6f8bf0c3b871ecda51ef2b5dd"}, - {file = "ruff-0.0.277-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:734165ea8feb81b0d53e3bf523adc2413fdb76f1264cde99555161dd5a725522"}, - {file = "ruff-0.0.277-py3-none-win32.whl", hash = "sha256:88d0f2afb2e0c26ac1120e7061ddda2a566196ec4007bd66d558f13b374b9efc"}, - {file = "ruff-0.0.277-py3-none-win_amd64.whl", hash = "sha256:6fe81732f788894a00f6ade1fe69e996cc9e485b7c35b0f53fb00284397284b2"}, - {file = "ruff-0.0.277-py3-none-win_arm64.whl", hash = "sha256:2d4444c60f2e705c14cd802b55cd2b561d25bf4311702c463a002392d3116b22"}, - {file = "ruff-0.0.277.tar.gz", hash = "sha256:2dab13cdedbf3af6d4427c07f47143746b6b95d9e4a254ac369a0edb9280a0d2"}, + {file = "ruff-0.0.286-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:8e22cb557e7395893490e7f9cfea1073d19a5b1dd337f44fd81359b2767da4e9"}, + {file = "ruff-0.0.286-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:68ed8c99c883ae79a9133cb1a86d7130feee0397fdf5ba385abf2d53e178d3fa"}, + {file = "ruff-0.0.286-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8301f0bb4ec1a5b29cfaf15b83565136c47abefb771603241af9d6038f8981e8"}, + {file = "ruff-0.0.286-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acc4598f810bbc465ce0ed84417ac687e392c993a84c7eaf3abf97638701c1ec"}, + {file = "ruff-0.0.286-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88c8e358b445eb66d47164fa38541cfcc267847d1e7a92dd186dddb1a0a9a17f"}, + {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0433683d0c5dbcf6162a4beb2356e820a593243f1fa714072fec15e2e4f4c939"}, + {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddb61a0c4454cbe4623f4a07fef03c5ae921fe04fede8d15c6e36703c0a73b07"}, + {file = "ruff-0.0.286-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47549c7c0be24c8ae9f2bce6f1c49fbafea83bca80142d118306f08ec7414041"}, + {file = "ruff-0.0.286-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:559aa793149ac23dc4310f94f2c83209eedb16908a0343663be19bec42233d25"}, + {file = "ruff-0.0.286-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d73cfb1c3352e7aa0ce6fb2321f36fa1d4a2c48d2ceac694cb03611ddf0e4db6"}, + {file = "ruff-0.0.286-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3dad93b1f973c6d1db4b6a5da8690c5625a3fa32bdf38e543a6936e634b83dc3"}, + {file = "ruff-0.0.286-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26afc0851f4fc3738afcf30f5f8b8612a31ac3455cb76e611deea80f5c0bf3ce"}, + {file = "ruff-0.0.286-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9b6b116d1c4000de1b9bf027131dbc3b8a70507788f794c6b09509d28952c512"}, + {file = "ruff-0.0.286-py3-none-win32.whl", hash = "sha256:556e965ac07c1e8c1c2d759ac512e526ecff62c00fde1a046acb088d3cbc1a6c"}, + {file = "ruff-0.0.286-py3-none-win_amd64.whl", hash = "sha256:5d295c758961376c84aaa92d16e643d110be32add7465e197bfdaec5a431a107"}, + {file = "ruff-0.0.286-py3-none-win_arm64.whl", hash = "sha256:1d6142d53ab7f164204b3133d053c4958d4d11ec3a39abf23a40b13b0784e3f0"}, + {file = "ruff-0.0.286.tar.gz", hash = "sha256:f1e9d169cce81a384a26ee5bb8c919fe9ae88255f39a1a69fd1ebab233a85ed2"}, ] [[package]] @@ -3339,4 +3339,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "0a8c6605e7e1d0ac7188a5d02b47a029bfb0f917458b87cb40755911442383d8" +content-hash = "87163d8994d09d3a7983ff647a9987d4277a3966dee48741437f4e98bca7e6db" diff --git a/pyproject.toml b/pyproject.toml index 499dd9532d..2f1277ab52 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -312,7 +312,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.0.277" +ruff = "0.0.286" # Typechecking lxml-stubs = ">=0.4.0" diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 1d268a1817..69a8318127 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -186,9 +186,9 @@ class Config: TypeError, if given something other than an integer or a string ValueError: if given a string not of the form described above. """ - if type(value) is int: + if type(value) is int: # noqa: E721 return value - elif type(value) is str: + elif isinstance(value, str): sizes = {"K": 1024, "M": 1024 * 1024} size = 1 suffix = value[-1] @@ -218,9 +218,9 @@ class Config: TypeError, if given something other than an integer or a string ValueError: if given a string not of the form described above. """ - if type(value) is int: + if type(value) is int: # noqa: E721 return value - elif type(value) is str: + elif isinstance(value, str): second = 1000 minute = 60 * second hour = 60 * minute diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 919f81a9b7..a70dfbf41f 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -34,7 +34,7 @@ class AppServiceConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.app_service_config_files = config.get("app_service_config_files", []) if not isinstance(self.app_service_config_files, list) or not all( - type(x) is str for x in self.app_service_config_files + isinstance(x, str) for x in self.app_service_config_files ): raise ConfigError( "Expected '%s' to be a list of AS config files:" diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 531bb74f07..2ac9f8b309 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -852,11 +852,11 @@ def _check_power_levels( "kick", "invite", }: - if type(v) is not int: + if type(v) is not int: # noqa: E721 raise SynapseError(400, f"{v!r} must be an integer.") if k in {"events", "notifications", "users"}: if not isinstance(v, collections.abc.Mapping) or not all( - type(v) is int for v in v.values() + type(v) is int for v in v.values() # noqa: E721 ): raise SynapseError( 400, diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 52acb21955..53af423a5a 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -702,7 +702,7 @@ def _copy_power_level_value_as_integer( :raises TypeError: if `old_value` is neither an integer nor a base-10 string representation of an integer. """ - if type(old_value) is int: + if type(old_value) is int: # noqa: E721 power_levels[key] = old_value return @@ -730,7 +730,7 @@ def validate_canonicaljson(value: Any) -> None: * Floats * NaN, Infinity, -Infinity """ - if type(value) is int: + if type(value) is int: # noqa: E721 if value < CANONICALJSON_MIN_INT or CANONICALJSON_MAX_INT < value: raise SynapseError(400, "JSON integer out of range", Codes.BAD_JSON) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 9278f1a1aa..34625dd7a1 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -151,7 +151,7 @@ class EventValidator: max_lifetime = event.content.get("max_lifetime") if min_lifetime is not None: - if type(min_lifetime) is not int: + if type(min_lifetime) is not int: # noqa: E721 raise SynapseError( code=400, msg="'min_lifetime' must be an integer", @@ -159,7 +159,7 @@ class EventValidator: ) if max_lifetime is not None: - if type(max_lifetime) is not int: + if type(max_lifetime) is not int: # noqa: E721 raise SynapseError( code=400, msg="'max_lifetime' must be an integer", diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 31e0260b83..d4e7dd45a9 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -280,7 +280,7 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB _strip_unsigned_values(pdu_json) depth = pdu_json["depth"] - if type(depth) is not int: + if type(depth) is not int: # noqa: E721 raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON) if depth < 0: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 89bd597409..607013f121 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1891,7 +1891,7 @@ class TimestampToEventResponse: ) origin_server_ts = d.get("origin_server_ts") - if type(origin_server_ts) is not int: + if type(origin_server_ts) is not int: # noqa: E721 raise ValueError( "Invalid response: 'origin_server_ts' must be a int but received %r" % origin_server_ts diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4a15c76a7b..187c3e6cc0 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -379,7 +379,7 @@ class MessageHandler: """ expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER) - if type(expiry_ts) is not int or event.is_state(): + if type(expiry_ts) is not int or event.is_state(): # noqa: E721 return # _schedule_expiry_for_event won't actually schedule anything if there's already diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 583c03447c..11342ccac8 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -243,7 +243,7 @@ class LegacyJsonSendParser(_BaseJsonParser[Tuple[int, JsonDict]]): return ( isinstance(v, list) and len(v) == 2 - and type(v[0]) == int + and type(v[0]) == int # noqa: E721 and isinstance(v[1], dict) ) diff --git a/synapse/media/oembed.py b/synapse/media/oembed.py index 5ad9eec80b..2ce842c98d 100644 --- a/synapse/media/oembed.py +++ b/synapse/media/oembed.py @@ -204,7 +204,7 @@ class OEmbedProvider: calc_description_and_urls(open_graph_response, oembed["html"]) for size in ("width", "height"): val = oembed.get(size) - if type(val) is int: + if type(val) is int: # noqa: E721 open_graph_response[f"og:video:{size}"] = val elif oembed_type == "link": diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 2bfa58ceee..d8979813b3 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -78,7 +78,7 @@ class Thumbnailer: image_exif = self.image._getexif() # type: ignore if image_exif is not None: image_orientation = image_exif.get(EXIF_ORIENTATION_TAG) - assert type(image_orientation) is int + assert type(image_orientation) is int # noqa: E721 self.transpose_method = EXIF_TRANSPOSE_MAPPINGS.get(image_orientation) except Exception as e: # A lot of parsing errors can happen when parsing EXIF diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 990c079c81..554634579e 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -379,7 +379,7 @@ class BulkPushRuleEvaluator: keys = list(notification_levels.keys()) for key in keys: level = notification_levels.get(key, SENTINEL) - if level is not SENTINEL and type(level) is not int: + if level is not SENTINEL and type(level) is not int: # noqa: E721 try: notification_levels[key] = int(level) except (TypeError, ValueError): @@ -472,7 +472,11 @@ StateGroup = Union[object, int] def _is_simple_value(value: Any) -> bool: - return isinstance(value, (bool, str)) or type(value) is int or value is None + return ( + isinstance(value, (bool, str)) + or type(value) is int # noqa: E721 + or value is None + ) def _flatten_dict( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 55e752fda8..94170715fb 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -157,7 +157,7 @@ class PurgeHistoryRestServlet(RestServlet): logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) elif "purge_up_to_ts" in body: ts = body["purge_up_to_ts"] - if type(ts) is not int: + if type(ts) is not int: # noqa: E721 raise SynapseError( HTTPStatus.BAD_REQUEST, "purge_up_to_ts must be an int", diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py index 95e751288b..ffce92d45e 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py @@ -143,7 +143,7 @@ class NewRegistrationTokenRestServlet(RestServlet): else: # Get length of token to generate (default is 16) length = body.get("length", 16) - if type(length) is not int: + if type(length) is not int: # noqa: E721 raise SynapseError( HTTPStatus.BAD_REQUEST, "length must be an integer", @@ -163,7 +163,8 @@ class NewRegistrationTokenRestServlet(RestServlet): uses_allowed = body.get("uses_allowed", None) if not ( - uses_allowed is None or (type(uses_allowed) is int and uses_allowed >= 0) + uses_allowed is None + or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721 ): raise SynapseError( HTTPStatus.BAD_REQUEST, @@ -172,13 +173,16 @@ class NewRegistrationTokenRestServlet(RestServlet): ) expiry_time = body.get("expiry_time", None) - if type(expiry_time) not in (int, type(None)): + if expiry_time is not None and type(expiry_time) is not int: # noqa: E721 raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must be an integer or null", Codes.INVALID_PARAM, ) - if type(expiry_time) is int and expiry_time < self.clock.time_msec(): + if ( + type(expiry_time) is int # noqa: E721 + and expiry_time < self.clock.time_msec() + ): raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must not be in the past", @@ -283,7 +287,7 @@ class RegistrationTokenRestServlet(RestServlet): uses_allowed = body["uses_allowed"] if not ( uses_allowed is None - or (type(uses_allowed) is int and uses_allowed >= 0) + or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721 ): raise SynapseError( HTTPStatus.BAD_REQUEST, @@ -294,13 +298,16 @@ class RegistrationTokenRestServlet(RestServlet): if "expiry_time" in body: expiry_time = body["expiry_time"] - if type(expiry_time) not in (int, type(None)): + if expiry_time is not None and type(expiry_time) is not int: # noqa: E721 raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must be an integer or null", Codes.INVALID_PARAM, ) - if type(expiry_time) is int and expiry_time < self.clock.time_msec(): + if ( + type(expiry_time) is int # noqa: E721 + and expiry_time < self.clock.time_msec() + ): raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must not be in the past", diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 240e6254b0..625a47ec1a 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -1172,14 +1172,17 @@ class RateLimitRestServlet(RestServlet): messages_per_second = body.get("messages_per_second", 0) burst_count = body.get("burst_count", 0) - if type(messages_per_second) is not int or messages_per_second < 0: + if ( + type(messages_per_second) is not int # noqa: E721 + or messages_per_second < 0 + ): raise SynapseError( HTTPStatus.BAD_REQUEST, "%r parameter must be a positive int" % (messages_per_second,), errcode=Codes.INVALID_PARAM, ) - if type(burst_count) is not int or burst_count < 0: + if type(burst_count) is not int or burst_count < 0: # noqa: E721 raise SynapseError( HTTPStatus.BAD_REQUEST, "%r parameter must be a positive int" % (burst_count,), diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py index ac1a63ca27..ee93e459f6 100644 --- a/synapse/rest/client/report_event.py +++ b/synapse/rest/client/report_event.py @@ -55,7 +55,7 @@ class ReportEventRestServlet(RestServlet): "Param 'reason' must be a string", Codes.BAD_JSON, ) - if type(body.get("score", 0)) is not int: + if type(body.get("score", 0)) is not int: # noqa: E721 raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'score' must be an integer", diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index c1353b18c1..c784612f59 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1671,7 +1671,7 @@ class PersistEventsStore: if self._ephemeral_messages_enabled: # If there's an expiry timestamp on the event, store it. expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER) - if type(expiry_ts) is int and not event.is_state(): + if type(expiry_ts) is int and not event.is_state(): # noqa: E721 self._insert_event_expiry_txn(txn, event.event_id, expiry_ts) # Insert into the room_memberships table. @@ -2039,10 +2039,10 @@ class PersistEventsStore: ): if ( "min_lifetime" in event.content - and type(event.content["min_lifetime"]) is not int + and type(event.content["min_lifetime"]) is not int # noqa: E721 ) or ( "max_lifetime" in event.content - and type(event.content["max_lifetime"]) is not int + and type(event.content["max_lifetime"]) is not int # noqa: E721 ): # Ignore the event if one of the value isn't an integer. return From 9ec3da06daf70b5e799545a6e12ead4846559d80 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 29 Aug 2023 10:38:56 -0400 Subject: [PATCH 392/562] Bump mypy-zope & mypy. (#16188) --- changelog.d/16188.misc | 1 + poetry.lock | 66 +++++++++---------- synapse/_scripts/synapse_port_db.py | 9 ++- synapse/logging/opentracing.py | 14 ++-- synapse/storage/database.py | 17 ++++- synapse/util/check_dependencies.py | 6 +- tests/appservice/test_api.py | 6 +- tests/federation/test_complexity.py | 24 +++---- tests/federation/test_federation_catch_up.py | 4 +- tests/federation/test_federation_sender.py | 4 +- tests/federation/transport/test_knocking.py | 4 +- tests/handlers/test_appservice.py | 10 +-- tests/handlers/test_cas.py | 8 +-- tests/handlers/test_e2e_keys.py | 4 +- tests/handlers/test_federation.py | 4 +- tests/handlers/test_oidc.py | 4 +- tests/handlers/test_password_providers.py | 2 +- tests/handlers/test_register.py | 6 +- tests/handlers/test_saml.py | 14 ++-- tests/handlers/test_typing.py | 26 ++++---- tests/logging/test_terse_json.py | 2 +- tests/module_api/test_api.py | 4 +- tests/push/test_bulk_push_rule_evaluator.py | 2 +- tests/replication/storage/test_events.py | 2 +- tests/rest/admin/test_user.py | 4 +- tests/rest/admin/test_username_available.py | 2 +- tests/rest/client/test_account.py | 2 +- tests/rest/client/test_events.py | 2 +- tests/rest/client/test_filter.py | 4 +- tests/rest/client/test_rooms.py | 12 ++-- tests/rest/client/test_shadow_banned.py | 2 +- tests/rest/client/test_third_party_rules.py | 2 +- tests/server.py | 2 +- .../test_resource_limits_server_notices.py | 30 ++++----- tests/storage/test_appservice.py | 2 +- tests/storage/test_monthly_active_users.py | 12 ++-- tests/test_federation.py | 6 +- tests/test_state.py | 4 +- tests/unittest.py | 6 +- 39 files changed, 177 insertions(+), 158 deletions(-) create mode 100644 changelog.d/16188.misc diff --git a/changelog.d/16188.misc b/changelog.d/16188.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16188.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/poetry.lock b/poetry.lock index 1d37c88328..6d63d71b2c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1445,43 +1445,43 @@ files = [ [[package]] name = "mypy" -version = "1.0.1" +version = "1.4.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, - {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, - {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"}, - {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"}, - {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"}, - {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"}, - {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"}, - {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"}, - {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"}, - {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"}, - {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"}, - {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"}, - {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"}, - {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"}, - {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"}, - {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"}, - {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"}, - {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"}, - {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"}, - {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"}, - {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"}, - {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"}, - {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"}, - {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"}, - {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"}, - {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"}, + {file = "mypy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8"}, + {file = "mypy-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878"}, + {file = "mypy-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd"}, + {file = "mypy-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc"}, + {file = "mypy-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1"}, + {file = "mypy-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462"}, + {file = "mypy-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258"}, + {file = "mypy-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2"}, + {file = "mypy-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7"}, + {file = "mypy-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01"}, + {file = "mypy-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b"}, + {file = "mypy-1.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b"}, + {file = "mypy-1.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7"}, + {file = "mypy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9"}, + {file = "mypy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042"}, + {file = "mypy-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3"}, + {file = "mypy-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6"}, + {file = "mypy-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f"}, + {file = "mypy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc"}, + {file = "mypy-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828"}, + {file = "mypy-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3"}, + {file = "mypy-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816"}, + {file = "mypy-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c"}, + {file = "mypy-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f"}, + {file = "mypy-1.4.1-py3-none-any.whl", hash = "sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4"}, + {file = "mypy-1.4.1.tar.gz", hash = "sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b"}, ] [package.dependencies] -mypy-extensions = ">=0.4.3" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=3.10" +typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -1502,17 +1502,17 @@ files = [ [[package]] name = "mypy-zope" -version = "0.9.1" +version = "1.0.0" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy-zope-0.9.1.tar.gz", hash = "sha256:4c87dbc71fec35f6533746ecdf9d400cd9281338d71c16b5676bb5ed00a97ca2"}, - {file = "mypy_zope-0.9.1-py3-none-any.whl", hash = "sha256:733d4399affe9e61e332ce9c4049418d6775c39b473e4b9f409d51c207c1b71a"}, + {file = "mypy-zope-1.0.0.tar.gz", hash = "sha256:be815c2fcb5333aa87e8ec682029ad3214142fe2a05ea383f9ff2d77c98008b7"}, + {file = "mypy_zope-1.0.0-py3-none-any.whl", hash = "sha256:9732e9b2198f2aec3343b38a51905ff49d44dc9e39e8e8bc6fc490b232388209"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.1.0" +mypy = ">=1.0.0,<1.5.0" "zope.interface" = "*" "zope.schema" = "*" diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 49242800b8..ab2b29cf1b 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -482,7 +482,10 @@ class Porter: do_backward[0] = False if forward_rows or backward_rows: - headers = [column[0] for column in txn.description] + assert txn.description is not None + headers: Optional[List[str]] = [ + column[0] for column in txn.description + ] else: headers = None @@ -544,6 +547,7 @@ class Porter: def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]: txn.execute(select, (forward_chunk, self.batch_size)) rows = txn.fetchall() + assert txn.description is not None headers = [column[0] for column in txn.description] return headers, rows @@ -919,7 +923,8 @@ class Porter: def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]: txn.execute(select) rows = txn.fetchall() - headers: List[str] = [column[0] for column in txn.description] + assert txn.description is not None + headers = [column[0] for column in txn.description] ts_ind = headers.index("ts") diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index be910128aa..5c3045e197 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -910,10 +910,10 @@ def _custom_sync_async_decorator( async def _wrapper( *args: P.args, **kwargs: P.kwargs ) -> Any: # Return type is RInner - with wrapping_logic(func, *args, **kwargs): - # type-ignore: func() returns R, but mypy doesn't know that R is - # Awaitable here. - return await func(*args, **kwargs) # type: ignore[misc] + # type-ignore: func() returns R, but mypy doesn't know that R is + # Awaitable here. + with wrapping_logic(func, *args, **kwargs): # type: ignore[arg-type] + return await func(*args, **kwargs) else: # The other case here handles sync functions including those decorated with @@ -980,8 +980,7 @@ def trace_with_opname( See the module's doc string for usage examples. """ - # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 - @contextlib.contextmanager # type: ignore[arg-type] + @contextlib.contextmanager def _wrapping_logic( func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> Generator[None, None, None]: @@ -1024,8 +1023,7 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: if not opentracing: return func - # type-ignore: mypy bug, see https://github.com/python/mypy/issues/12909 - @contextlib.contextmanager # type: ignore[arg-type] + @contextlib.contextmanager def _wrapping_logic( func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> Generator[None, None, None]: diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a1c8fb0f46..55ac313f33 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -31,6 +31,7 @@ from typing import ( Iterator, List, Optional, + Sequence, Tuple, Type, TypeVar, @@ -358,7 +359,21 @@ class LoggingTransaction: return self.txn.rowcount @property - def description(self) -> Any: + def description( + self, + ) -> Optional[ + Sequence[ + Tuple[ + str, + Optional[Any], + Optional[int], + Optional[int], + Optional[int], + Optional[int], + Optional[int], + ] + ] + ]: return self.txn.description def execute_batch(self, sql: str, args: Iterable[Iterable[Any]]) -> None: diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 114130a08f..f7cead9e12 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -51,9 +51,9 @@ class DependencyException(Exception): DEV_EXTRAS = {"lint", "mypy", "test", "dev"} -RUNTIME_EXTRAS = ( - set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra")) - DEV_EXTRAS -) +ALL_EXTRAS = metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra") +assert ALL_EXTRAS is not None +RUNTIME_EXTRAS = set(ALL_EXTRAS) - DEV_EXTRAS VERSION = metadata.version(DISTRIBUTION_NAME) diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 3c635e3dcb..75fb5fae6b 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -96,7 +96,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): ) # We assign to a method, which mypy doesn't like. - self.api.get_json = Mock(side_effect=get_json) # type: ignore[assignment] + self.api.get_json = Mock(side_effect=get_json) # type: ignore[method-assign] result = self.get_success( self.api.query_3pe(self.service, "user", PROTOCOL, {b"some": [b"field"]}) @@ -168,7 +168,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): ) # We assign to a method, which mypy doesn't like. - self.api.get_json = Mock(side_effect=get_json) # type: ignore[assignment] + self.api.get_json = Mock(side_effect=get_json) # type: ignore[method-assign] result = self.get_success( self.api.query_3pe(self.service, "user", PROTOCOL, {b"some": [b"field"]}) @@ -215,7 +215,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): return RESPONSE # We assign to a method, which mypy doesn't like. - self.api.post_json_get_json = Mock(side_effect=post_json_get_json) # type: ignore[assignment] + self.api.post_json_get_json = Mock(side_effect=post_json_get_json) # type: ignore[method-assign] MISSING_KEYS = [ # Known user, known device, missing algorithm. diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 5b58fb13b5..73a2766baf 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -57,7 +57,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): async def get_current_state_event_counts(room_id: str) -> int: return int(500 * 1.23) - store.get_current_state_event_counts = get_current_state_event_counts # type: ignore[assignment] + store.get_current_state_event_counts = get_current_state_event_counts # type: ignore[method-assign] # Get the room complexity again -- make sure it's our artificial value channel = self.make_signed_federation_request( @@ -74,8 +74,8 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] - handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[method-assign] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[method-assign] return_value=("", 1) ) @@ -105,8 +105,8 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] - handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[method-assign] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[method-assign] return_value=("", 1) ) @@ -142,8 +142,8 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = AsyncMock(return_value=None) # type: ignore[assignment] - handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + fed_transport.client.get_json = AsyncMock(return_value=None) # type: ignore[method-assign] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[method-assign] return_value=("", 1) ) @@ -151,7 +151,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): async def get_current_state_event_counts(room_id: str) -> int: return 600 - self.hs.get_datastores().main.get_current_state_event_counts = get_current_state_event_counts # type: ignore[assignment] + self.hs.get_datastores().main.get_current_state_event_counts = get_current_state_event_counts # type: ignore[method-assign] d = handler._remote_join( create_requester(u1), @@ -199,8 +199,8 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] - handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[method-assign] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[method-assign] return_value=("", 1) ) @@ -229,8 +229,8 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[assignment] - handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[assignment] + fed_transport.client.get_json = AsyncMock(return_value={"v1": 9999}) # type: ignore[method-assign] + handler.federation_handler.do_invite_join = AsyncMock( # type: ignore[method-assign] return_value=("", 1) ) diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 40318aa1b6..75ae740b43 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -50,7 +50,7 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): # This mock is crucial for destination_rooms to be populated. # TODO: this seems to no longer be the case---tests pass with this mock # commented out. - state_storage_controller.get_current_hosts_in_room = AsyncMock( # type: ignore[assignment] + state_storage_controller.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign] return_value={"test", "host2"} ) @@ -436,7 +436,7 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): def wake_destination_track(destination: str) -> None: woken.add(destination) - self.federation_sender.wake_destination = wake_destination_track # type: ignore[assignment] + self.federation_sender.wake_destination = wake_destination_track # type: ignore[method-assign] # We wait quite long so that all dests can be woken up, since there is a delay # between them. diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 5ea4a75a9f..7bd3d06859 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -47,11 +47,11 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): federation_transport_client=self.federation_transport_client, ) - hs.get_storage_controllers().state.get_current_hosts_in_room = AsyncMock( # type: ignore[assignment] + hs.get_storage_controllers().state.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign] return_value={"test", "host2"} ) - hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[assignment] + hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[method-assign] hs.get_storage_controllers().state.get_current_hosts_in_room ) diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index 70209ab090..3f42f79f26 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -218,7 +218,7 @@ class FederationKnockingTestCase( ) -> EventBase: return pdu - homeserver.get_federation_server()._check_sigs_and_hash = ( # type: ignore[assignment] + homeserver.get_federation_server()._check_sigs_and_hash = ( # type: ignore[method-assign] approve_all_signature_checking ) @@ -229,7 +229,7 @@ class FederationKnockingTestCase( ) -> None: pass - homeserver.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[assignment] + homeserver.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[method-assign] return super().prepare(reactor, clock, homeserver) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 4bd0facd65..46d022092e 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -400,11 +400,11 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): # Mock the ApplicationServiceScheduler's _TransactionController's send method so that # we can track any outgoing ephemeral events self.send_mock = AsyncMock() - hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[assignment] + hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[method-assign] # Mock out application services, and allow defining our own in tests self._services: List[ApplicationService] = [] - self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[assignment] + self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[method-assign] return_value=self._services ) @@ -898,11 +898,11 @@ class ApplicationServicesHandlerDeviceListsTestCase(unittest.HomeserverTestCase) # Mock ApplicationServiceApi's put_json, so we can verify the raw JSON that # will be sent over the wire self.put_json = AsyncMock() - hs.get_application_service_api().put_json = self.put_json # type: ignore[assignment] + hs.get_application_service_api().put_json = self.put_json # type: ignore[method-assign] # Mock out application services, and allow defining our own in tests self._services: List[ApplicationService] = [] - self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[assignment] + self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[method-assign] return_value=self._services ) @@ -1004,7 +1004,7 @@ class ApplicationServicesHandlerOtkCountsTestCase(unittest.HomeserverTestCase): # Mock the ApplicationServiceScheduler's _TransactionController's send method so that # we can track what's going out self.send_mock = AsyncMock() - hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[assignment] # We assign to a method. + hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[method-assign] # We assign to a method. # Define an application service for the tests self._service_token = "VERYSECRET" diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 2cb24add20..8582b1cd1e 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -60,7 +60,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] cas_response = CasResponse("test_user", {}) request = _mock_request() @@ -88,7 +88,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] # Map a user via SSO. cas_response = CasResponse("test_user", {}) @@ -128,7 +128,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] cas_response = CasResponse("föö", {}) request = _mock_request() @@ -159,7 +159,7 @@ class CasHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] # The response doesn't have the proper userGroup or department. cas_response = CasResponse("test_user", {}) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 7917766a08..c5556f2844 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -800,7 +800,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_client_keys = mock.AsyncMock( # type: ignore[assignment] + self.hs.get_federation_client().query_client_keys = mock.AsyncMock( # type: ignore[method-assign] return_value={ "device_keys": {remote_user_id: {}}, "master_keys": { @@ -876,7 +876,7 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_user_devices = mock.AsyncMock( # type: ignore[assignment] + self.hs.get_federation_client().query_user_devices = mock.AsyncMock( # type: ignore[method-assign] return_value={ "user_id": remote_user_id, "stream_id": 1, diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index bd743b3578..21d63ab1f2 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -371,14 +371,14 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): # We mock out the FederationClient.backfill method, to pretend that a remote # server has returned our fake event. federation_client_backfill_mock = AsyncMock(return_value=[event]) - self.hs.get_federation_client().backfill = federation_client_backfill_mock # type: ignore[assignment] + self.hs.get_federation_client().backfill = federation_client_backfill_mock # type: ignore[method-assign] # We also mock the persist method with a side effect of itself. This allows us # to track when it has been called while preserving its function. persist_events_and_notify_mock = Mock( side_effect=self.hs.get_federation_event_handler().persist_events_and_notify ) - self.hs.get_federation_event_handler().persist_events_and_notify = ( # type: ignore[assignment] + self.hs.get_federation_event_handler().persist_events_and_notify = ( # type: ignore[method-assign] persist_events_and_notify_mock ) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 9b2c7812cc..e797aaae00 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -157,7 +157,7 @@ class OidcHandlerTestCase(HomeserverTestCase): sso_handler = hs.get_sso_handler() # Mock the render error method. self.render_error = Mock(return_value=None) - sso_handler.render_error = self.render_error # type: ignore[assignment] + sso_handler.render_error = self.render_error # type: ignore[method-assign] # Reduce the number of attempts when generating MXIDs. sso_handler._MAP_USERNAME_RETRIES = 3 @@ -165,7 +165,7 @@ class OidcHandlerTestCase(HomeserverTestCase): auth_handler = hs.get_auth_handler() # Mock the complete SSO login method. self.complete_sso_login = AsyncMock() - auth_handler.complete_sso_login = self.complete_sso_login # type: ignore[assignment] + auth_handler.complete_sso_login = self.complete_sso_login # type: ignore[method-assign] return hs diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 4496370c3f..11ec8c7f11 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -830,7 +830,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): username: The username to use for the test. registration: Whether to test with registration URLs. """ - self.hs.get_identity_handler().send_threepid_validation = AsyncMock( # type: ignore[assignment] + self.hs.get_identity_handler().send_threepid_validation = AsyncMock( # type: ignore[method-assign] return_value=0 ) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index a04234829f..e9fbf32c7c 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -202,7 +202,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_not_blocked(self) -> None: - self.store.count_monthly_users = AsyncMock( # type: ignore[assignment] + self.store.count_monthly_users = AsyncMock( # type: ignore[method-assign] return_value=self.hs.config.server.max_mau_value - 1 ) # Ensure does not throw exception @@ -299,7 +299,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self) -> None: room_alias_str = "#room:test" - self.store.count_real_users = AsyncMock(return_value=1) # type: ignore[assignment] + self.store.count_real_users = AsyncMock(return_value=1) # type: ignore[method-assign] self.store.is_real_user = AsyncMock(return_value=True) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -314,7 +314,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user( self, ) -> None: - self.store.count_real_users = AsyncMock(return_value=2) # type: ignore[assignment] + self.store.count_real_users = AsyncMock(return_value=2) # type: ignore[method-assign] self.store.is_real_user = AsyncMock(return_value=True) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 6e666d7bed..00f4e181e8 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -133,7 +133,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] # send a mocked-up SAML response to the callback saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) @@ -163,7 +163,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] # Map a user via SSO. saml_response = FakeAuthnResponse( @@ -205,11 +205,11 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] # mock out the error renderer too sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) # type: ignore[assignment] + sso_handler.render_error = Mock(return_value=None) # type: ignore[method-assign] saml_response = FakeAuthnResponse({"uid": "test", "username": "föö"}) request = _mock_request() @@ -226,9 +226,9 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler and error renderer auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) # type: ignore[assignment] + sso_handler.render_error = Mock(return_value=None) # type: ignore[method-assign] # register a user to occupy the first-choice MXID store = self.hs.get_datastores().main @@ -311,7 +311,7 @@ class SamlHandlerTestCase(HomeserverTestCase): # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[assignment] + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] # The response doesn't have the proper userGroup or department. saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index d776526bc1..2a295da3a0 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -122,15 +122,15 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore.get_destination_retry_timings = AsyncMock(return_value=None) - self.datastore.get_device_updates_by_remote = AsyncMock( # type: ignore[assignment] + self.datastore.get_device_updates_by_remote = AsyncMock( # type: ignore[method-assign] return_value=(0, []) ) - self.datastore.get_destination_last_successful_stream_ordering = AsyncMock( # type: ignore[assignment] + self.datastore.get_destination_last_successful_stream_ordering = AsyncMock( # type: ignore[method-assign] return_value=None ) - self.datastore.get_received_txn_response = AsyncMock( # type: ignore[assignment] + self.datastore.get_received_txn_response = AsyncMock( # type: ignore[method-assign] return_value=None ) @@ -143,25 +143,25 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): raise AuthError(401, "User is not in the room") return None - hs.get_auth().check_user_in_room = Mock( # type: ignore[assignment] + hs.get_auth().check_user_in_room = Mock( # type: ignore[method-assign] side_effect=check_user_in_room ) async def check_host_in_room(room_id: str, server_name: str) -> bool: return room_id == ROOM_ID - hs.get_event_auth_handler().is_host_in_room = Mock( # type: ignore[assignment] + hs.get_event_auth_handler().is_host_in_room = Mock( # type: ignore[method-assign] side_effect=check_host_in_room ) async def get_current_hosts_in_room(room_id: str) -> Set[str]: return {member.domain for member in self.room_members} - hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[assignment] + hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[method-assign] side_effect=get_current_hosts_in_room ) - hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = Mock( # type: ignore[assignment] + hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = Mock( # type: ignore[method-assign] side_effect=get_current_hosts_in_room ) @@ -170,24 +170,24 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore.get_users_in_room = Mock(side_effect=get_users_in_room) - self.datastore.get_user_directory_stream_pos = AsyncMock( # type: ignore[assignment] + self.datastore.get_user_directory_stream_pos = AsyncMock( # type: ignore[method-assign] # we deliberately return a non-None stream pos to avoid # doing an initial_sync return_value=1 ) - self.datastore.get_partial_current_state_deltas = Mock(return_value=(0, None)) # type: ignore[assignment] + self.datastore.get_partial_current_state_deltas = Mock(return_value=(0, None)) # type: ignore[method-assign] - self.datastore.get_to_device_stream_token = Mock( # type: ignore[assignment] + self.datastore.get_to_device_stream_token = Mock( # type: ignore[method-assign] return_value=0 ) - self.datastore.get_new_device_msgs_for_remote = AsyncMock( # type: ignore[assignment] + self.datastore.get_new_device_msgs_for_remote = AsyncMock( # type: ignore[method-assign] return_value=([], 0) ) - self.datastore.delete_device_msgs_for_remote = AsyncMock( # type: ignore[assignment] + self.datastore.delete_device_msgs_for_remote = AsyncMock( # type: ignore[method-assign] return_value=None ) - self.datastore.set_received_txn_response = AsyncMock( # type: ignore[assignment] + self.datastore.set_received_txn_response = AsyncMock( # type: ignore[method-assign] return_value=None ) diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index fa27f1279a..c379853e20 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -164,7 +164,7 @@ class TerseJsonTestCase(LoggerCleanupMixin, TestCase): # Call requestReceived to finish instantiating the object. request.content = BytesIO() # Partially skip some internal processing of SynapseRequest. - request._started_processing = Mock() # type: ignore[assignment] + request._started_processing = Mock() # type: ignore[method-assign] request.request_metrics = Mock(spec=["name"]) with patch.object(Request, "render"): request.requestReceived(b"POST", b"/_matrix/client/versions", b"1.1") diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 9ce9326190..172fc3a736 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -233,7 +233,7 @@ class ModuleApiTestCase(BaseModuleApiTestCase): def test_sending_events_into_room(self) -> None: """Tests that a module can send events into a room""" # Mock out create_and_send_nonmember_event to check whether events are being sent - self.event_creation_handler.create_and_send_nonmember_event = Mock( # type: ignore[assignment] + self.event_creation_handler.create_and_send_nonmember_event = Mock( # type: ignore[method-assign] spec=[], side_effect=self.event_creation_handler.create_and_send_nonmember_event, ) @@ -579,7 +579,7 @@ class ModuleApiTestCase(BaseModuleApiTestCase): # Necessary to fake a remote join. fake_stream_id = 1 mocked_remote_join = AsyncMock(return_value=("fake-event-id", fake_stream_id)) - self.hs.get_room_member_handler()._remote_join = mocked_remote_join # type: ignore[assignment] + self.hs.get_room_member_handler()._remote_join = mocked_remote_join # type: ignore[method-assign] fake_remote_host = f"{self.module_api.server_name}-remote" # Given that the join is to be faked, we expect the relevant join event not to diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index a3880ac171..7c23b77e0a 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -190,7 +190,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): # Mock the method which calculates push rules -- we do this instead of # e.g. checking the results in the database because we want to ensure # that code isn't even running. - bulk_evaluator._action_for_event_by_user = AsyncMock() # type: ignore[assignment] + bulk_evaluator._action_for_event_by_user = AsyncMock() # type: ignore[method-assign] # Ensure no actions are generated! self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index f7c6417a09..af25815fa5 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -58,7 +58,7 @@ def patch__eq__(cls: object) -> Callable[[], None]: def unpatch() -> None: if eq is not None: - cls.__eq__ = eq # type: ignore[assignment] + cls.__eq__ = eq # type: ignore[method-assign] return unpatch diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 339a41c7e1..2f6bd0d74f 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -71,8 +71,8 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.hs.config.registration.registration_shared_secret = "shared" - self.hs.get_media_repository = Mock() # type: ignore[assignment] - self.hs.get_deactivate_account_handler = Mock() # type: ignore[assignment] + self.hs.get_media_repository = Mock() # type: ignore[method-assign] + self.hs.get_deactivate_account_handler = Mock() # type: ignore[method-assign] return self.hs diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index 6c04e6c56c..4c69d224b8 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -50,7 +50,7 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase): ) handler = self.hs.get_registration_handler() - handler.check_username = check_username # type: ignore[assignment] + handler.check_username = check_username # type: ignore[method-assign] def test_username_available(self) -> None: """ diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index ac19f3c6da..e9f495e206 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -1346,7 +1346,7 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): return {} # Register a mock that will return the expected result depending on the remote. - self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) # type: ignore[assignment] + self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) # type: ignore[method-assign] # Check that we've got the correct response from the client-side endpoint. self._test_status( diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py index 54df2a252c..141e0f57a3 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py @@ -45,7 +45,7 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): hs = self.setup_test_homeserver(config=config) - hs.get_federation_handler = Mock() # type: ignore[assignment] + hs.get_federation_handler = Mock() # type: ignore[method-assign] return hs diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index a2d5d340be..90a8df147c 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -65,14 +65,14 @@ class FilterTestCase(unittest.HomeserverTestCase): def test_add_filter_non_local_user(self) -> None: _is_mine = self.hs.is_mine - self.hs.is_mine = lambda target_user: False # type: ignore[assignment] + self.hs.is_mine = lambda target_user: False # type: ignore[method-assign] channel = self.make_request( "POST", "/_matrix/client/r0/user/%s/filter" % (self.user_id), self.EXAMPLE_FILTER_JSON, ) - self.hs.is_mine = _is_mine # type: ignore[assignment] + self.hs.is_mine = _is_mine # type: ignore[method-assign] self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 53182459e4..47c1d38ad7 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -68,7 +68,7 @@ class RoomBase(unittest.HomeserverTestCase): "red", ) - self.hs.get_federation_handler = Mock() # type: ignore[assignment] + self.hs.get_federation_handler = Mock() # type: ignore[method-assign] self.hs.get_federation_handler.return_value.maybe_backfill = AsyncMock( return_value=None ) @@ -76,7 +76,7 @@ class RoomBase(unittest.HomeserverTestCase): async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: return None - self.hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[assignment] + self.hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[method-assign] return self.hs @@ -3413,8 +3413,8 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = AsyncMock(return_value=(Mock(event_id="abc"), 0)) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] - self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[assignment] + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[method-assign] + self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[method-assign] return_value=None, ) @@ -3477,8 +3477,8 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = AsyncMock(return_value=(Mock(event_id="abc"), 0)) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] - self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[assignment] + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[method-assign] + self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[method-assign] return_value=None, ) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index 8d2cdf8751..9aecf88e41 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -84,7 +84,7 @@ class RoomTestCase(_ShadowBannedBase): def test_invite_3pid(self) -> None: """Ensure that a 3PID invite does not attempt to contact the identity server.""" identity_handler = self.hs.get_identity_handler() - identity_handler.lookup_3pid = Mock( # type: ignore[assignment] + identity_handler.lookup_3pid = Mock( # type: ignore[method-assign] side_effect=AssertionError("This should not get called") ) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index da37fcb045..57eb713b15 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -117,7 +117,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): async def _check_event_auth(origin: Any, event: Any, context: Any) -> None: pass - hs.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[assignment] + hs.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[method-assign] return hs diff --git a/tests/server.py b/tests/server.py index 659ccce838..08633fe640 100644 --- a/tests/server.py +++ b/tests/server.py @@ -722,7 +722,7 @@ def _make_test_homeserver_synchronous(server: HomeServer) -> None: **kwargs, ) - pool.runWithConnection = runWithConnection # type: ignore[assignment] + pool.runWithConnection = runWithConnection # type: ignore[method-assign] pool.runInteraction = runInteraction # type: ignore[assignment] # Replace the thread pool with a threadless 'thread' pool pool.threadpool = ThreadPool(clock._reactor) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 47c53a5475..17f428bfc5 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -69,7 +69,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): self._rlsn = rlsn self._rlsn._store.user_last_seen_monthly_active = AsyncMock(return_value=1000) - self._rlsn._server_notices_manager.send_notice = AsyncMock( # type: ignore[assignment] + self._rlsn._server_notices_manager.send_notice = AsyncMock( # type: ignore[method-assign] return_value=Mock() ) self._send_notice = self._rlsn._server_notices_manager.send_notice @@ -82,8 +82,8 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): self._rlsn._server_notices_manager.maybe_get_notice_room_for_user = AsyncMock( return_value="!something:localhost" ) - self._rlsn._store.add_tag_to_room = AsyncMock(return_value=None) # type: ignore[assignment] - self._rlsn._store.get_tags_for_room = AsyncMock(return_value={}) # type: ignore[assignment] + self._rlsn._store.add_tag_to_room = AsyncMock(return_value=None) # type: ignore[method-assign] + self._rlsn._store.get_tags_for_room = AsyncMock(return_value={}) # type: ignore[method-assign] @override_config({"hs_disabled": True}) def test_maybe_send_server_notice_disabled_hs(self) -> None: @@ -100,13 +100,13 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self) -> None: """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = AsyncMock( # type: ignore[assignment] + self._rlsn._store.get_events = AsyncMock( # type: ignore[method-assign] return_value={"123": mock_event} ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -122,7 +122,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user has blocked notice, but notice ought to be there (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None, side_effect=ResourceLimitError(403, "foo"), ) @@ -130,7 +130,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = AsyncMock( # type: ignore[assignment] + self._rlsn._store.get_events = AsyncMock( # type: ignore[method-assign] return_value={"123": mock_event} ) @@ -142,7 +142,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, but should have one """ - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None, side_effect=ResourceLimitError(403, "foo"), ) @@ -155,7 +155,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None ) @@ -168,7 +168,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None ) self._rlsn._store.user_last_seen_monthly_active = AsyncMock(return_value=None) @@ -184,7 +184,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room """ - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None, side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER @@ -199,7 +199,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test that when a server is disabled, that MAU limit alerting is ignored. """ - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None, side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED @@ -218,21 +218,21 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. """ - self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[assignment] + self._rlsn._auth_blocking.check_auth_blocking = AsyncMock( # type: ignore[method-assign] return_value=None, side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) - self._rlsn._is_room_currently_blocked = AsyncMock( # type: ignore[assignment] + self._rlsn._is_room_currently_blocked = AsyncMock( # type: ignore[method-assign] return_value=(True, []) ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = AsyncMock( # type: ignore[assignment] + self._rlsn._store.get_events = AsyncMock( # type: ignore[method-assign] return_value={"123": mock_event} ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 48f39df9fe..cbce26a725 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -338,7 +338,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase): # we aren't testing store._base stuff here, so mock this out # (ignore needed because Mypy won't allow us to assign to a method otherwise) - self.store.get_events_as_list = AsyncMock(return_value=events) # type: ignore[assignment] + self.store.get_events_as_list = AsyncMock(return_value=events) # type: ignore[method-assign] self.get_success(self._insert_txn(self.as_list[1]["id"], 9, other_events)) self.get_success(self._insert_txn(service.id, 10, events)) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 0bf706ba08..49366440ce 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -252,7 +252,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): ) self.get_success(d) - self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[method-assign] d = self.store.populate_monthly_active_users(user_id) self.get_success(d) @@ -260,9 +260,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.upsert_monthly_active_user.assert_not_called() def test_populate_monthly_users_should_update(self) -> None: - self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[method-assign] - self.store.is_trial_user = AsyncMock(return_value=False) # type: ignore[assignment] + self.store.is_trial_user = AsyncMock(return_value=False) # type: ignore[method-assign] self.store.user_last_seen_monthly_active = AsyncMock(return_value=None) d = self.store.populate_monthly_active_users("user_id") @@ -271,9 +271,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.upsert_monthly_active_user.assert_called_once() def test_populate_monthly_users_should_not_update(self) -> None: - self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[method-assign] - self.store.is_trial_user = AsyncMock(return_value=False) # type: ignore[assignment] + self.store.is_trial_user = AsyncMock(return_value=False) # type: ignore[method-assign] self.store.user_last_seen_monthly_active = AsyncMock( return_value=self.hs.get_clock().time_msec() ) @@ -356,7 +356,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": False, "mau_stats_only": False}) def test_no_users_when_not_tracking(self) -> None: - self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[assignment] + self.store.upsert_monthly_active_user = AsyncMock(return_value=None) # type: ignore[method-assign] self.get_success(self.store.populate_monthly_active_users("@user:sever")) diff --git a/tests/test_federation.py b/tests/test_federation.py index 779f70467b..f8ade6da38 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -80,7 +80,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): ) -> None: pass - federation_event_handler._check_event_auth = _check_event_auth # type: ignore[assignment] + federation_event_handler._check_event_auth = _check_event_auth # type: ignore[method-assign] self.client = self.hs.get_federation_client() async def _check_sigs_and_hash_for_pulled_events_and_fetch( @@ -190,7 +190,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Register the mock on the federation client. federation_client = self.hs.get_federation_client() - federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[assignment] + federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[method-assign] # Register a mock on the store so that the incoming update doesn't fail because # we don't share a room with the user. @@ -240,7 +240,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Register mock device list retrieval on the federation client. federation_client = self.hs.get_federation_client() - federation_client.query_user_devices = AsyncMock( # type: ignore[assignment] + federation_client.query_user_devices = AsyncMock( # type: ignore[method-assign] return_value={ "user_id": remote_user_id, "stream_id": 1, diff --git a/tests/test_state.py b/tests/test_state.py index eded38c766..9c8679cc1d 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -714,7 +714,7 @@ class StateTestCase(unittest.TestCase): store = _DummyStore() store.register_events(old_state_1) store.register_events(old_state_2) - self.dummy_store.get_events = store.get_events # type: ignore[assignment] + self.dummy_store.get_events = store.get_events # type: ignore[method-assign] context: EventContext context = yield self._get_context( @@ -773,7 +773,7 @@ class StateTestCase(unittest.TestCase): store = _DummyStore() store.register_events(old_state_1) store.register_events(old_state_2) - self.dummy_store.get_events = store.get_events # type: ignore[assignment] + self.dummy_store.get_events = store.get_events # type: ignore[method-assign] context: EventContext context = yield self._get_context( diff --git a/tests/unittest.py b/tests/unittest.py index 40672a4415..5d3640d8ac 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -395,9 +395,9 @@ class HomeserverTestCase(TestCase): ) # Type ignore: mypy doesn't like us assigning to methods. - self.hs.get_auth().get_user_by_req = get_requester # type: ignore[assignment] - self.hs.get_auth().get_user_by_access_token = get_requester # type: ignore[assignment] - self.hs.get_auth().get_access_token_from_request = Mock(return_value=token) # type: ignore[assignment] + self.hs.get_auth().get_user_by_req = get_requester # type: ignore[method-assign] + self.hs.get_auth().get_user_by_access_token = get_requester # type: ignore[method-assign] + self.hs.get_auth().get_access_token_from_request = Mock(return_value=token) # type: ignore[method-assign] if self.needs_threadpool: self.reactor.threadpool = ThreadPool() # type: ignore[assignment] From e9235d92f2a3cde489a4d24303e7868a93f3fb4d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 29 Aug 2023 11:44:07 -0400 Subject: [PATCH 393/562] Track currently syncing users by device for presence (#16172) Refactoring to use both the user ID & the device ID when tracking the currently syncing users in the presence handler. This is done both locally and over replication. Note that the device ID is discarded but will be used in a future change. --- changelog.d/16172.misc | 1 + synapse/handlers/presence.py | 155 ++++++++++++++++++---------- synapse/replication/tcp/commands.py | 17 ++- synapse/replication/tcp/handler.py | 19 +++- 4 files changed, 129 insertions(+), 63 deletions(-) create mode 100644 changelog.d/16172.misc diff --git a/changelog.d/16172.misc b/changelog.d/16172.misc new file mode 100644 index 0000000000..4d709cb56e --- /dev/null +++ b/changelog.d/16172.misc @@ -0,0 +1 @@ +Track per-device information in the presence code. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 50c68c86ce..2f841863ae 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -23,6 +23,7 @@ The methods that define policy are: """ import abc import contextlib +import itertools import logging from bisect import bisect from contextlib import contextmanager @@ -188,15 +189,17 @@ class BasePresenceHandler(abc.ABC): """ @abc.abstractmethod - def get_currently_syncing_users_for_replication(self) -> Iterable[str]: - """Get an iterable of syncing users on this worker, to send to the presence handler + def get_currently_syncing_users_for_replication( + self, + ) -> Iterable[Tuple[str, Optional[str]]]: + """Get an iterable of syncing users and devices on this worker, to send to the presence handler This is called when a replication connection is established. It should return - a list of user ids, which are then sent as USER_SYNC commands to inform the - process handling presence about those users. + a list of tuples of user ID & device ID, which are then sent as USER_SYNC commands + to inform the process handling presence about those users/devices. Returns: - An iterable of user_id strings. + An iterable of tuples of user ID and device ID. """ async def get_state(self, target_user: UserID) -> UserPresenceState: @@ -284,7 +287,12 @@ class BasePresenceHandler(abc.ABC): """ async def update_external_syncs_row( # noqa: B027 (no-op by design) - self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + self, + process_id: str, + user_id: str, + device_id: Optional[str], + is_syncing: bool, + sync_time_msec: int, ) -> None: """Update the syncing users for an external process as a delta. @@ -295,6 +303,7 @@ class BasePresenceHandler(abc.ABC): syncing against. This allows synapse to process updates as user start and stop syncing against a given process. user_id: The user who has started or stopped syncing + device_id: The user's device that has started or stopped syncing is_syncing: Whether or not the user is now syncing sync_time_msec: Time in ms when the user was last syncing """ @@ -425,16 +434,18 @@ class WorkerPresenceHandler(BasePresenceHandler): hs.config.worker.writers.presence, ) - # The number of ongoing syncs on this process, by user id. + # The number of ongoing syncs on this process, by (user ID, device ID). # Empty if _presence_enabled is false. - self._user_to_num_current_syncs: Dict[str, int] = {} + self._user_device_to_num_current_syncs: Dict[ + Tuple[str, Optional[str]], int + ] = {} self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() - # user_id -> last_sync_ms. Lists the users that have stopped syncing but - # we haven't notified the presence writer of that yet - self.users_going_offline: Dict[str, int] = {} + # (user_id, device_id) -> last_sync_ms. Lists the devices that have stopped + # syncing but we haven't notified the presence writer of that yet + self._user_devices_going_offline: Dict[Tuple[str, Optional[str]], int] = {} self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) @@ -457,39 +468,47 @@ class WorkerPresenceHandler(BasePresenceHandler): ClearUserSyncsCommand(self.instance_id) ) - def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None: + def send_user_sync( + self, + user_id: str, + device_id: Optional[str], + is_syncing: bool, + last_sync_ms: int, + ) -> None: if self._presence_enabled: self.hs.get_replication_command_handler().send_user_sync( - self.instance_id, user_id, is_syncing, last_sync_ms + self.instance_id, user_id, device_id, is_syncing, last_sync_ms ) - def mark_as_coming_online(self, user_id: str) -> None: + def mark_as_coming_online(self, user_id: str, device_id: Optional[str]) -> None: """A user has started syncing. Send a UserSync to the presence writer, unless they had recently stopped syncing. """ - going_offline = self.users_going_offline.pop(user_id, None) + going_offline = self._user_devices_going_offline.pop((user_id, device_id), None) if not going_offline: # Safe to skip because we haven't yet told the presence writer they # were offline - self.send_user_sync(user_id, True, self.clock.time_msec()) + self.send_user_sync(user_id, device_id, True, self.clock.time_msec()) - def mark_as_going_offline(self, user_id: str) -> None: + def mark_as_going_offline(self, user_id: str, device_id: Optional[str]) -> None: """A user has stopped syncing. We wait before notifying the presence writer as its likely they'll come back soon. This allows us to avoid sending a stopped syncing immediately followed by a started syncing notification to the presence writer """ - self.users_going_offline[user_id] = self.clock.time_msec() + self._user_devices_going_offline[(user_id, device_id)] = self.clock.time_msec() def send_stop_syncing(self) -> None: """Check if there are any users who have stopped syncing a while ago and haven't come back yet. If there are poke the presence writer about them. """ now = self.clock.time_msec() - for user_id, last_sync_ms in list(self.users_going_offline.items()): + for (user_id, device_id), last_sync_ms in list( + self._user_devices_going_offline.items() + ): if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: - self.users_going_offline.pop(user_id, None) - self.send_user_sync(user_id, False, last_sync_ms) + self._user_devices_going_offline.pop((user_id, device_id), None) + self.send_user_sync(user_id, device_id, False, last_sync_ms) async def user_syncing( self, @@ -515,23 +534,23 @@ class WorkerPresenceHandler(BasePresenceHandler): is_sync=True, ) - curr_sync = self._user_to_num_current_syncs.get(user_id, 0) - self._user_to_num_current_syncs[user_id] = curr_sync + 1 + curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0) + self._user_device_to_num_current_syncs[(user_id, device_id)] = curr_sync + 1 # If this is the first in-flight sync, notify replication - if self._user_to_num_current_syncs[user_id] == 1: - self.mark_as_coming_online(user_id) + if self._user_device_to_num_current_syncs[(user_id, device_id)] == 1: + self.mark_as_coming_online(user_id, device_id) def _end() -> None: # We check that the user_id is in user_to_num_current_syncs because # user_to_num_current_syncs may have been cleared if we are # shutting down. - if user_id in self._user_to_num_current_syncs: - self._user_to_num_current_syncs[user_id] -= 1 + if (user_id, device_id) in self._user_device_to_num_current_syncs: + self._user_device_to_num_current_syncs[(user_id, device_id)] -= 1 # If there are no more in-flight syncs, notify replication - if self._user_to_num_current_syncs[user_id] == 0: - self.mark_as_going_offline(user_id) + if self._user_device_to_num_current_syncs[(user_id, device_id)] == 0: + self.mark_as_going_offline(user_id, device_id) @contextlib.contextmanager def _user_syncing() -> Generator[None, None, None]: @@ -598,10 +617,12 @@ class WorkerPresenceHandler(BasePresenceHandler): # If this is a federation sender, notify about presence updates. await self.maybe_send_presence_to_interested_destinations(state_to_notify) - def get_currently_syncing_users_for_replication(self) -> Iterable[str]: + def get_currently_syncing_users_for_replication( + self, + ) -> Iterable[Tuple[str, Optional[str]]]: return [ - user_id - for user_id, count in self._user_to_num_current_syncs.items() + user_id_device_id + for user_id_device_id, count in self._user_device_to_num_current_syncs.items() if count > 0 ] @@ -723,17 +744,23 @@ class PresenceHandler(BasePresenceHandler): # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. - self.user_to_num_current_syncs: Dict[str, int] = {} + self._user_device_to_num_current_syncs: Dict[ + Tuple[str, Optional[str]], int + ] = {} # Keeps track of the number of *ongoing* syncs on other processes. + # # While any sync is ongoing on another process the user will never # go offline. + # # Each process has a unique identifier and an update frequency. If # no update is received from that process within the update period then # we assume that all the sync requests on that process have stopped. - # Stored as a dict from process_id to set of user_id, and a dict of - # process_id to millisecond timestamp last updated. - self.external_process_to_current_syncs: Dict[str, Set[str]] = {} + # Stored as a dict from process_id to set of (user_id, device_id), and + # a dict of process_id to millisecond timestamp last updated. + self.external_process_to_current_syncs: Dict[ + str, Set[Tuple[str, Optional[str]]] + ] = {} self.external_process_last_updated_ms: Dict[str, int] = {} self.external_sync_linearizer = Linearizer(name="external_sync_linearizer") @@ -938,7 +965,10 @@ class PresenceHandler(BasePresenceHandler): # that were syncing on that process to see if they need to be timed # out. users_to_check.update( - self.external_process_to_current_syncs.pop(process_id, ()) + user_id + for user_id, device_id in self.external_process_to_current_syncs.pop( + process_id, () + ) ) self.external_process_last_updated_ms.pop(process_id) @@ -951,11 +981,15 @@ class PresenceHandler(BasePresenceHandler): syncing_user_ids = { user_id - for user_id, count in self.user_to_num_current_syncs.items() + for (user_id, _), count in self._user_device_to_num_current_syncs.items() if count } - for user_ids in self.external_process_to_current_syncs.values(): - syncing_user_ids.update(user_ids) + syncing_user_ids.update( + user_id + for user_id, _ in itertools.chain( + *self.external_process_to_current_syncs.values() + ) + ) changes = handle_timeouts( states, @@ -1013,8 +1047,8 @@ class PresenceHandler(BasePresenceHandler): if not affect_presence or not self._presence_enabled: return _NullContextManager() - curr_sync = self.user_to_num_current_syncs.get(user_id, 0) - self.user_to_num_current_syncs[user_id] = curr_sync + 1 + curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0) + self._user_device_to_num_current_syncs[(user_id, device_id)] = curr_sync + 1 # Note that this causes last_active_ts to be incremented which is not # what the spec wants. @@ -1027,7 +1061,7 @@ class PresenceHandler(BasePresenceHandler): async def _end() -> None: try: - self.user_to_num_current_syncs[user_id] -= 1 + self._user_device_to_num_current_syncs[(user_id, device_id)] -= 1 prev_state = await self.current_state_for_user(user_id) await self._update_states( @@ -1049,12 +1083,19 @@ class PresenceHandler(BasePresenceHandler): return _user_syncing() - def get_currently_syncing_users_for_replication(self) -> Iterable[str]: + def get_currently_syncing_users_for_replication( + self, + ) -> Iterable[Tuple[str, Optional[str]]]: # since we are the process handling presence, there is nothing to do here. return [] async def update_external_syncs_row( - self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + self, + process_id: str, + user_id: str, + device_id: Optional[str], + is_syncing: bool, + sync_time_msec: int, ) -> None: """Update the syncing users for an external process as a delta. @@ -1063,6 +1104,7 @@ class PresenceHandler(BasePresenceHandler): syncing against. This allows synapse to process updates as user start and stop syncing against a given process. user_id: The user who has started or stopped syncing + device_id: The user's device that has started or stopped syncing is_syncing: Whether or not the user is now syncing sync_time_msec: Time in ms when the user was last syncing """ @@ -1073,26 +1115,27 @@ class PresenceHandler(BasePresenceHandler): process_id, set() ) - # USER_SYNC is sent when a user starts or stops syncing on a remote - # process. (But only for the initial and last device.) + # USER_SYNC is sent when a user's device starts or stops syncing on + # a remote # process. (But only for the initial and last sync for that + # device.) # - # When a user *starts* syncing it also calls set_state(...) which + # When a device *starts* syncing it also calls set_state(...) which # will update the state, last_active_ts, and last_user_sync_ts. - # Simply ensure the user is tracked as syncing in this case. + # Simply ensure the user & device is tracked as syncing in this case. # - # When a user *stops* syncing, update the last_user_sync_ts and mark + # When a device *stops* syncing, update the last_user_sync_ts and mark # them as no longer syncing. Note this doesn't quite match the # monolith behaviour, which updates last_user_sync_ts at the end of # every sync, not just the last in-flight sync. - if is_syncing and user_id not in process_presence: - process_presence.add(user_id) - elif not is_syncing and user_id in process_presence: + if is_syncing and (user_id, device_id) not in process_presence: + process_presence.add((user_id, device_id)) + elif not is_syncing and (user_id, device_id) in process_presence: new_state = prev_state.copy_and_replace( last_user_sync_ts=sync_time_msec ) await self._update_states([new_state]) - process_presence.discard(user_id) + process_presence.discard((user_id, device_id)) self.external_process_last_updated_ms[process_id] = self.clock.time_msec() @@ -1106,7 +1149,9 @@ class PresenceHandler(BasePresenceHandler): process_presence = self.external_process_to_current_syncs.pop( process_id, set() ) - prev_states = await self.current_state_for_users(process_presence) + prev_states = await self.current_state_for_users( + {user_id for user_id, device_id in process_presence} + ) time_now_ms = self.clock.time_msec() await self._update_states( diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 58a871c6d9..e616b5e1c8 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -267,27 +267,38 @@ class UserSyncCommand(Command): NAME = "USER_SYNC" def __init__( - self, instance_id: str, user_id: str, is_syncing: bool, last_sync_ms: int + self, + instance_id: str, + user_id: str, + device_id: Optional[str], + is_syncing: bool, + last_sync_ms: int, ): self.instance_id = instance_id self.user_id = user_id + self.device_id = device_id self.is_syncing = is_syncing self.last_sync_ms = last_sync_ms @classmethod def from_line(cls: Type["UserSyncCommand"], line: str) -> "UserSyncCommand": - instance_id, user_id, state, last_sync_ms = line.split(" ", 3) + device_id: Optional[str] + instance_id, user_id, device_id, state, last_sync_ms = line.split(" ", 4) + + if device_id == "None": + device_id = None if state not in ("start", "end"): raise Exception("Invalid USER_SYNC state %r" % (state,)) - return cls(instance_id, user_id, state == "start", int(last_sync_ms)) + return cls(instance_id, user_id, device_id, state == "start", int(last_sync_ms)) def to_line(self) -> str: return " ".join( ( self.instance_id, self.user_id, + str(self.device_id), "start" if self.is_syncing else "end", str(self.last_sync_ms), ) diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 92c5a55acc..d9045d7b73 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -428,7 +428,11 @@ class ReplicationCommandHandler: if self._is_presence_writer: return self._presence_handler.update_external_syncs_row( - cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms + cmd.instance_id, + cmd.user_id, + cmd.device_id, + cmd.is_syncing, + cmd.last_sync_ms, ) else: return None @@ -699,9 +703,9 @@ class ReplicationCommandHandler: ) now = self._clock.time_msec() - for user_id in currently_syncing: + for user_id, device_id in currently_syncing: connection.send_command( - UserSyncCommand(self._instance_id, user_id, True, now) + UserSyncCommand(self._instance_id, user_id, device_id, True, now) ) def lost_connection(self, connection: IReplicationConnection) -> None: @@ -753,11 +757,16 @@ class ReplicationCommandHandler: self.send_command(FederationAckCommand(self._instance_name, token)) def send_user_sync( - self, instance_id: str, user_id: str, is_syncing: bool, last_sync_ms: int + self, + instance_id: str, + user_id: str, + device_id: Optional[str], + is_syncing: bool, + last_sync_ms: int, ) -> None: """Poke the master that a user has started/stopped syncing.""" self.send_command( - UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms) + UserSyncCommand(instance_id, user_id, device_id, is_syncing, last_sync_ms) ) def send_user_ip( From 62a1a9be52f4bc79b112f9841ddb3d03b8efccba Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 30 Aug 2023 00:39:39 +0100 Subject: [PATCH 394/562] Describe which rate limiter was hit in logs (#16135) --- changelog.d/16135.misc | 1 + synapse/api/errors.py | 14 ++- synapse/api/ratelimiting.py | 20 ++-- synapse/config/ratelimiting.py | 132 ++++++++++++++------- synapse/handlers/auth.py | 8 +- synapse/handlers/devicemessage.py | 3 +- synapse/handlers/identity.py | 6 +- synapse/handlers/room_member.py | 21 ++-- synapse/handlers/room_summary.py | 5 +- synapse/http/server.py | 8 +- synapse/rest/client/login.py | 6 +- synapse/rest/client/login_token_request.py | 10 +- synapse/rest/client/register.py | 3 +- synapse/server.py | 3 +- synapse/util/ratelimitutils.py | 3 +- tests/api/test_errors.py | 15 ++- tests/api/test_ratelimiting.py | 67 +++++++---- tests/config/test_ratelimiting.py | 31 +++++ 18 files changed, 235 insertions(+), 121 deletions(-) create mode 100644 changelog.d/16135.misc diff --git a/changelog.d/16135.misc b/changelog.d/16135.misc new file mode 100644 index 0000000000..cba8733d02 --- /dev/null +++ b/changelog.d/16135.misc @@ -0,0 +1 @@ +Describe which rate limiter was hit in logs. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 578e798773..fdb2955be8 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -211,6 +211,11 @@ class SynapseError(CodeMessageException): def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, **self._additional_fields) + @property + def debug_context(self) -> Optional[str]: + """Override this to add debugging context that shouldn't be sent to clients.""" + return None + class InvalidAPICallError(SynapseError): """You called an existing API endpoint, but fed that endpoint @@ -508,8 +513,8 @@ class LimitExceededError(SynapseError): def __init__( self, + limiter_name: str, code: int = 429, - msg: str = "Too Many Requests", retry_after_ms: Optional[int] = None, errcode: str = Codes.LIMIT_EXCEEDED, ): @@ -518,12 +523,17 @@ class LimitExceededError(SynapseError): if self.include_retry_after_header and retry_after_ms is not None else None ) - super().__init__(code, msg, errcode, headers=headers) + super().__init__(code, "Too Many Requests", errcode, headers=headers) self.retry_after_ms = retry_after_ms + self.limiter_name = limiter_name def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict": return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms) + @property + def debug_context(self) -> Optional[str]: + return self.limiter_name + class RoomKeysVersionError(SynapseError): """A client has tried to upload to a non-current version of the room_keys store""" diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 511790c7c5..887b214d64 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -61,12 +61,16 @@ class Ratelimiter: """ def __init__( - self, store: DataStore, clock: Clock, rate_hz: float, burst_count: int + self, + store: DataStore, + clock: Clock, + cfg: RatelimitSettings, ): self.clock = clock - self.rate_hz = rate_hz - self.burst_count = burst_count + self.rate_hz = cfg.per_second + self.burst_count = cfg.burst_count self.store = store + self._limiter_name = cfg.key # An ordered dictionary representing the token buckets tracked by this rate # limiter. Each entry maps a key of arbitrary type to a tuple representing: @@ -305,7 +309,8 @@ class Ratelimiter: if not allowed: raise LimitExceededError( - retry_after_ms=int(1000 * (time_allowed - time_now_s)) + limiter_name=self._limiter_name, + retry_after_ms=int(1000 * (time_allowed - time_now_s)), ) @@ -322,7 +327,9 @@ class RequestRatelimiter: # The rate_hz and burst_count are overridden on a per-user basis self.request_ratelimiter = Ratelimiter( - store=self.store, clock=self.clock, rate_hz=0, burst_count=0 + store=self.store, + clock=self.clock, + cfg=RatelimitSettings(key=rc_message.key, per_second=0, burst_count=0), ) self._rc_message = rc_message @@ -332,8 +339,7 @@ class RequestRatelimiter: self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=rc_admin_redaction.per_second, - burst_count=rc_admin_redaction.burst_count, + cfg=rc_admin_redaction, ) else: self.admin_redaction_ratelimiter = None diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index a5514e70a2..4efbaeac0d 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, cast import attr @@ -21,16 +21,47 @@ from synapse.types import JsonDict from ._base import Config +@attr.s(slots=True, frozen=True, auto_attribs=True) class RatelimitSettings: - def __init__( - self, - config: Dict[str, float], + key: str + per_second: float + burst_count: int + + @classmethod + def parse( + cls, + config: Dict[str, Any], + key: str, defaults: Optional[Dict[str, float]] = None, - ): + ) -> "RatelimitSettings": + """Parse config[key] as a new-style rate limiter config. + + The key may refer to a nested dictionary using a full stop (.) to separate + each nested key. For example, use the key "a.b.c" to parse the following: + + a: + b: + c: + per_second: 10 + burst_count: 200 + + If this lookup fails, we'll fallback to the defaults. + """ defaults = defaults or {"per_second": 0.17, "burst_count": 3.0} - self.per_second = config.get("per_second", defaults["per_second"]) - self.burst_count = int(config.get("burst_count", defaults["burst_count"])) + rl_config = config + for part in key.split("."): + rl_config = rl_config.get(part, {}) + + # By this point we should have hit the rate limiter parameters. + # We don't actually check this though! + rl_config = cast(Dict[str, float], rl_config) + + return cls( + key=key, + per_second=rl_config.get("per_second", defaults["per_second"]), + burst_count=int(rl_config.get("burst_count", defaults["burst_count"])), + ) @attr.s(auto_attribs=True) @@ -49,15 +80,14 @@ class RatelimitConfig(Config): # Load the new-style messages config if it exists. Otherwise fall back # to the old method. if "rc_message" in config: - self.rc_message = RatelimitSettings( - config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0} + self.rc_message = RatelimitSettings.parse( + config, "rc_message", defaults={"per_second": 0.2, "burst_count": 10.0} ) else: self.rc_message = RatelimitSettings( - { - "per_second": config.get("rc_messages_per_second", 0.2), - "burst_count": config.get("rc_message_burst_count", 10.0), - } + key="rc_messages", + per_second=config.get("rc_messages_per_second", 0.2), + burst_count=config.get("rc_message_burst_count", 10.0), ) # Load the new-style federation config, if it exists. Otherwise, fall @@ -79,51 +109,59 @@ class RatelimitConfig(Config): } ) - self.rc_registration = RatelimitSettings(config.get("rc_registration", {})) + self.rc_registration = RatelimitSettings.parse(config, "rc_registration", {}) - self.rc_registration_token_validity = RatelimitSettings( - config.get("rc_registration_token_validity", {}), + self.rc_registration_token_validity = RatelimitSettings.parse( + config, + "rc_registration_token_validity", defaults={"per_second": 0.1, "burst_count": 5}, ) # It is reasonable to login with a bunch of devices at once (i.e. when # setting up an account), but it is *not* valid to continually be # logging into new devices. - rc_login_config = config.get("rc_login", {}) - self.rc_login_address = RatelimitSettings( - rc_login_config.get("address", {}), + self.rc_login_address = RatelimitSettings.parse( + config, + "rc_login.address", defaults={"per_second": 0.003, "burst_count": 5}, ) - self.rc_login_account = RatelimitSettings( - rc_login_config.get("account", {}), + self.rc_login_account = RatelimitSettings.parse( + config, + "rc_login.account", defaults={"per_second": 0.003, "burst_count": 5}, ) - self.rc_login_failed_attempts = RatelimitSettings( - rc_login_config.get("failed_attempts", {}) + self.rc_login_failed_attempts = RatelimitSettings.parse( + config, + "rc_login.failed_attempts", + {}, ) self.federation_rr_transactions_per_room_per_second = config.get( "federation_rr_transactions_per_room_per_second", 50 ) - rc_admin_redaction = config.get("rc_admin_redaction") self.rc_admin_redaction = None - if rc_admin_redaction: - self.rc_admin_redaction = RatelimitSettings(rc_admin_redaction) + if "rc_admin_redaction" in config: + self.rc_admin_redaction = RatelimitSettings.parse( + config, "rc_admin_redaction", {} + ) - self.rc_joins_local = RatelimitSettings( - config.get("rc_joins", {}).get("local", {}), + self.rc_joins_local = RatelimitSettings.parse( + config, + "rc_joins.local", defaults={"per_second": 0.1, "burst_count": 10}, ) - self.rc_joins_remote = RatelimitSettings( - config.get("rc_joins", {}).get("remote", {}), + self.rc_joins_remote = RatelimitSettings.parse( + config, + "rc_joins.remote", defaults={"per_second": 0.01, "burst_count": 10}, ) # Track the rate of joins to a given room. If there are too many, temporarily # prevent local joins and remote joins via this server. - self.rc_joins_per_room = RatelimitSettings( - config.get("rc_joins_per_room", {}), + self.rc_joins_per_room = RatelimitSettings.parse( + config, + "rc_joins_per_room", defaults={"per_second": 1, "burst_count": 10}, ) @@ -132,31 +170,37 @@ class RatelimitConfig(Config): # * For requests received over federation this is keyed by the origin. # # Note that this isn't exposed in the configuration as it is obscure. - self.rc_key_requests = RatelimitSettings( - config.get("rc_key_requests", {}), + self.rc_key_requests = RatelimitSettings.parse( + config, + "rc_key_requests", defaults={"per_second": 20, "burst_count": 100}, ) - self.rc_3pid_validation = RatelimitSettings( - config.get("rc_3pid_validation") or {}, + self.rc_3pid_validation = RatelimitSettings.parse( + config, + "rc_3pid_validation", defaults={"per_second": 0.003, "burst_count": 5}, ) - self.rc_invites_per_room = RatelimitSettings( - config.get("rc_invites", {}).get("per_room", {}), + self.rc_invites_per_room = RatelimitSettings.parse( + config, + "rc_invites.per_room", defaults={"per_second": 0.3, "burst_count": 10}, ) - self.rc_invites_per_user = RatelimitSettings( - config.get("rc_invites", {}).get("per_user", {}), + self.rc_invites_per_user = RatelimitSettings.parse( + config, + "rc_invites.per_user", defaults={"per_second": 0.003, "burst_count": 5}, ) - self.rc_invites_per_issuer = RatelimitSettings( - config.get("rc_invites", {}).get("per_issuer", {}), + self.rc_invites_per_issuer = RatelimitSettings.parse( + config, + "rc_invites.per_issuer", defaults={"per_second": 0.3, "burst_count": 10}, ) - self.rc_third_party_invite = RatelimitSettings( - config.get("rc_third_party_invite", {}), + self.rc_third_party_invite = RatelimitSettings.parse( + config, + "rc_third_party_invite", defaults={"per_second": 0.0025, "burst_count": 5}, ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 59ecafa6a0..2b0c505130 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -218,19 +218,17 @@ class AuthHandler: self._failed_uia_attempts_ratelimiter = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second, - burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count, + cfg=self.hs.config.ratelimiting.rc_login_failed_attempts, ) # The number of seconds to keep a UI auth session active. self._ui_auth_session_timeout = hs.config.auth.ui_auth_session_timeout - # Ratelimitier for failed /login attempts + # Ratelimiter for failed /login attempts self._failed_login_attempts_ratelimiter = Ratelimiter( store=self.store, clock=hs.get_clock(), - rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second, - burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count, + cfg=self.hs.config.ratelimiting.rc_login_failed_attempts, ) self._clock = self.hs.get_clock() diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 17ff8821d9..798c7039f9 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -90,8 +90,7 @@ class DeviceMessageHandler: self._ratelimiter = Ratelimiter( store=self.store, clock=hs.get_clock(), - rate_hz=hs.config.ratelimiting.rc_key_requests.per_second, - burst_count=hs.config.ratelimiting.rc_key_requests.burst_count, + cfg=hs.config.ratelimiting.rc_key_requests, ) async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None: diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 3031384d25..472879c964 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -66,14 +66,12 @@ class IdentityHandler: self._3pid_validation_ratelimiter_ip = Ratelimiter( store=self.store, clock=hs.get_clock(), - rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second, - burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count, + cfg=hs.config.ratelimiting.rc_3pid_validation, ) self._3pid_validation_ratelimiter_address = Ratelimiter( store=self.store, clock=hs.get_clock(), - rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second, - burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count, + cfg=hs.config.ratelimiting.rc_3pid_validation, ) async def ratelimit_request_token_requests( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 1d8d4a72e7..de0f04e3fe 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -112,8 +112,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self._join_rate_limiter_local = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=hs.config.ratelimiting.rc_joins_local.per_second, - burst_count=hs.config.ratelimiting.rc_joins_local.burst_count, + cfg=hs.config.ratelimiting.rc_joins_local, ) # Tracks joins from local users to rooms this server isn't a member of. # I.e. joins this server makes by requesting /make_join /send_join from @@ -121,8 +120,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self._join_rate_limiter_remote = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second, - burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count, + cfg=hs.config.ratelimiting.rc_joins_remote, ) # TODO: find a better place to keep this Ratelimiter. # It needs to be @@ -135,8 +133,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self._join_rate_per_room_limiter = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=hs.config.ratelimiting.rc_joins_per_room.per_second, - burst_count=hs.config.ratelimiting.rc_joins_per_room.burst_count, + cfg=hs.config.ratelimiting.rc_joins_per_room, ) # Ratelimiter for invites, keyed by room (across all issuers, all @@ -144,8 +141,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self._invites_per_room_limiter = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second, - burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count, + cfg=hs.config.ratelimiting.rc_invites_per_room, ) # Ratelimiter for invites, keyed by recipient (across all rooms, all @@ -153,8 +149,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self._invites_per_recipient_limiter = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second, - burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count, + cfg=hs.config.ratelimiting.rc_invites_per_user, ) # Ratelimiter for invites, keyed by issuer (across all rooms, all @@ -162,15 +157,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self._invites_per_issuer_limiter = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=hs.config.ratelimiting.rc_invites_per_issuer.per_second, - burst_count=hs.config.ratelimiting.rc_invites_per_issuer.burst_count, + cfg=hs.config.ratelimiting.rc_invites_per_issuer, ) self._third_party_invite_limiter = Ratelimiter( store=self.store, clock=self.clock, - rate_hz=hs.config.ratelimiting.rc_third_party_invite.per_second, - burst_count=hs.config.ratelimiting.rc_third_party_invite.burst_count, + cfg=hs.config.ratelimiting.rc_third_party_invite, ) self.request_ratelimiter = hs.get_request_ratelimiter() diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index dad3e23470..dd559b4c45 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( UnsupportedRoomVersionError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.config.ratelimiting import RatelimitSettings from synapse.events import EventBase from synapse.types import JsonDict, Requester, StrCollection from synapse.util.caches.response_cache import ResponseCache @@ -94,7 +95,9 @@ class RoomSummaryHandler: self._server_name = hs.hostname self._federation_client = hs.get_federation_client() self._ratelimiter = Ratelimiter( - store=self._store, clock=hs.get_clock(), rate_hz=5, burst_count=10 + store=self._store, + clock=hs.get_clock(), + cfg=RatelimitSettings("", per_second=5, burst_count=10), ) # If a user tries to fetch the same page multiple times in quick succession, diff --git a/synapse/http/server.py b/synapse/http/server.py index 5109cec983..3bbf91298e 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -115,7 +115,13 @@ def return_json_error( if exc.headers is not None: for header, value in exc.headers.items(): request.setHeader(header, value) - logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) + error_ctx = exc.debug_context + if error_ctx: + logger.info( + "%s SynapseError: %s - %s (%s)", request, error_code, exc.msg, error_ctx + ) + else: + logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg) elif f.check(CancelledError): error_code = HTTP_STATUS_REQUEST_CANCELLED error_dict = {"error": "Request cancelled", "errcode": Codes.UNKNOWN} diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index d724c68920..7be327e26f 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -120,14 +120,12 @@ class LoginRestServlet(RestServlet): self._address_ratelimiter = Ratelimiter( store=self._main_store, clock=hs.get_clock(), - rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second, - burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count, + cfg=self.hs.config.ratelimiting.rc_login_address, ) self._account_ratelimiter = Ratelimiter( store=self._main_store, clock=hs.get_clock(), - rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second, - burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count, + cfg=self.hs.config.ratelimiting.rc_login_account, ) # ensure the CAS/SAML/OIDC handlers are loaded on this worker instance. diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py index b1629f94a5..d189a923b5 100644 --- a/synapse/rest/client/login_token_request.py +++ b/synapse/rest/client/login_token_request.py @@ -16,6 +16,7 @@ import logging from typing import TYPE_CHECKING, Tuple from synapse.api.ratelimiting import Ratelimiter +from synapse.config.ratelimiting import RatelimitSettings from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -66,15 +67,18 @@ class LoginTokenRequestServlet(RestServlet): self.token_timeout = hs.config.auth.login_via_existing_token_timeout self._require_ui_auth = hs.config.auth.login_via_existing_require_ui_auth - # Ratelimit aggressively to a maxmimum of 1 request per minute. + # Ratelimit aggressively to a maximum of 1 request per minute. # # This endpoint can be used to spawn additional sessions and could be # abused by a malicious client to create many sessions. self._ratelimiter = Ratelimiter( store=self._main_store, clock=hs.get_clock(), - rate_hz=1 / 60, - burst_count=1, + cfg=RatelimitSettings( + key="", + per_second=1 / 60, + burst_count=1, + ), ) @interactive_auth_handler diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 77e3b91b79..132623462a 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -376,8 +376,7 @@ class RegistrationTokenValidityRestServlet(RestServlet): self.ratelimiter = Ratelimiter( store=self.store, clock=hs.get_clock(), - rate_hz=hs.config.ratelimiting.rc_registration_token_validity.per_second, - burst_count=hs.config.ratelimiting.rc_registration_token_validity.burst_count, + cfg=hs.config.ratelimiting.rc_registration_token_validity, ) async def on_GET(self, request: Request) -> Tuple[int, JsonDict]: diff --git a/synapse/server.py b/synapse/server.py index 7cdd3ea3c2..fd16dacd0d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -408,8 +408,7 @@ class HomeServer(metaclass=abc.ABCMeta): return Ratelimiter( store=self.get_datastores().main, clock=self.get_clock(), - rate_hz=self.config.ratelimiting.rc_registration.per_second, - burst_count=self.config.ratelimiting.rc_registration.burst_count, + cfg=self.config.ratelimiting.rc_registration, ) @cache_in_self diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index cde4a0780f..f693ba2a8c 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -291,7 +291,8 @@ class _PerHostRatelimiter: if self.metrics_name: rate_limit_reject_counter.labels(self.metrics_name).inc() raise LimitExceededError( - retry_after_ms=int(self.window_size / self.sleep_limit) + limiter_name="rc_federation", + retry_after_ms=int(self.window_size / self.sleep_limit), ) self.request_times.append(time_now) diff --git a/tests/api/test_errors.py b/tests/api/test_errors.py index 319abfe63d..8e159029d9 100644 --- a/tests/api/test_errors.py +++ b/tests/api/test_errors.py @@ -1,6 +1,5 @@ # Copyright 2023 The Matrix.org Foundation C.I.C. # -# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,24 +12,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json + from synapse.api.errors import LimitExceededError from tests import unittest -class ErrorsTestCase(unittest.TestCase): +class LimitExceededErrorTestCase(unittest.TestCase): + def test_key_appears_in_context_but_not_error_dict(self) -> None: + err = LimitExceededError("needle") + serialised = json.dumps(err.error_dict(None)) + self.assertIn("needle", err.debug_context) + self.assertNotIn("needle", serialised) + # Create a sub-class to avoid mutating the class-level property. class LimitExceededErrorHeaders(LimitExceededError): include_retry_after_header = True def test_limit_exceeded_header(self) -> None: - err = ErrorsTestCase.LimitExceededErrorHeaders(retry_after_ms=100) + err = self.LimitExceededErrorHeaders(limiter_name="test", retry_after_ms=100) self.assertEqual(err.error_dict(None).get("retry_after_ms"), 100) assert err.headers is not None self.assertEqual(err.headers.get("Retry-After"), "1") def test_limit_exceeded_rounding(self) -> None: - err = ErrorsTestCase.LimitExceededErrorHeaders(retry_after_ms=3001) + err = self.LimitExceededErrorHeaders(limiter_name="test", retry_after_ms=3001) self.assertEqual(err.error_dict(None).get("retry_after_ms"), 3001) assert err.headers is not None self.assertEqual(err.headers.get("Retry-After"), "4") diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index fa6c1c02ce..a24638c9ef 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -1,5 +1,6 @@ from synapse.api.ratelimiting import LimitExceededError, Ratelimiter from synapse.appservice import ApplicationService +from synapse.config.ratelimiting import RatelimitSettings from synapse.types import create_requester from tests import unittest @@ -10,8 +11,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=1, + cfg=RatelimitSettings(key="", per_second=0.1, burst_count=1), ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", _time_now_s=0) @@ -43,8 +43,11 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=1, + cfg=RatelimitSettings( + key="", + per_second=0.1, + burst_count=1, + ), ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=0) @@ -76,8 +79,11 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=1, + cfg=RatelimitSettings( + key="", + per_second=0.1, + burst_count=1, + ), ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=0) @@ -101,8 +107,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=1, + cfg=RatelimitSettings(key="", per_second=0.1, burst_count=1), ) # Shouldn't raise @@ -128,8 +133,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=1, + cfg=RatelimitSettings(key="", per_second=0.1, burst_count=1), ) # First attempt should be allowed @@ -177,8 +181,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=1, + cfg=RatelimitSettings(key="", per_second=0.1, burst_count=1), ) # First attempt should be allowed @@ -208,8 +211,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=1, + cfg=RatelimitSettings(key="", per_second=0.1, burst_count=1), ) self.get_success_or_raise( limiter.can_do_action(None, key="test_id_1", _time_now_s=0) @@ -244,7 +246,11 @@ class TestRatelimiter(unittest.HomeserverTestCase): ) ) - limiter = Ratelimiter(store=store, clock=self.clock, rate_hz=0.1, burst_count=1) + limiter = Ratelimiter( + store=store, + clock=self.clock, + cfg=RatelimitSettings("", per_second=0.1, burst_count=1), + ) # Shouldn't raise for _ in range(20): @@ -254,8 +260,11 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=3, + cfg=RatelimitSettings( + key="", + per_second=0.1, + burst_count=3, + ), ) # Test that 4 actions aren't allowed with a maximum burst of 3. allowed, time_allowed = self.get_success_or_raise( @@ -321,8 +330,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=3, + cfg=RatelimitSettings("", per_second=0.1, burst_count=3), ) def consume_at(time: float) -> bool: @@ -346,8 +354,11 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=3, + cfg=RatelimitSettings( + "", + per_second=0.1, + burst_count=3, + ), ) # Observe two actions, leaving room in the bucket for one more. @@ -369,8 +380,11 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=3, + cfg=RatelimitSettings( + "", + per_second=0.1, + burst_count=3, + ), ) # Observe three actions, filling up the bucket. @@ -398,8 +412,11 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, - rate_hz=0.1, - burst_count=3, + cfg=RatelimitSettings( + "", + per_second=0.1, + burst_count=3, + ), ) # Observe four actions, exceeding the bucket. diff --git a/tests/config/test_ratelimiting.py b/tests/config/test_ratelimiting.py index f12147eaa0..0c27dd21e2 100644 --- a/tests/config/test_ratelimiting.py +++ b/tests/config/test_ratelimiting.py @@ -12,11 +12,42 @@ # See the License for the specific language governing permissions and # limitations under the License. from synapse.config.homeserver import HomeServerConfig +from synapse.config.ratelimiting import RatelimitSettings from tests.unittest import TestCase from tests.utils import default_config +class ParseRatelimitSettingsTestcase(TestCase): + def test_depth_1(self) -> None: + cfg = { + "a": { + "per_second": 5, + "burst_count": 10, + } + } + parsed = RatelimitSettings.parse(cfg, "a") + self.assertEqual(parsed, RatelimitSettings("a", 5, 10)) + + def test_depth_2(self) -> None: + cfg = { + "a": { + "b": { + "per_second": 5, + "burst_count": 10, + }, + } + } + parsed = RatelimitSettings.parse(cfg, "a.b") + self.assertEqual(parsed, RatelimitSettings("a.b", 5, 10)) + + def test_missing(self) -> None: + parsed = RatelimitSettings.parse( + {}, "a", defaults={"per_second": 5, "burst_count": 10} + ) + self.assertEqual(parsed, RatelimitSettings("a", 5, 10)) + + class RatelimitConfigTestCase(TestCase): def test_parse_rc_federation(self) -> None: config_dict = default_config("test") From ebd8374fb5f10f84fc818058100ec7ae284835b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 30 Aug 2023 06:10:56 -0400 Subject: [PATCH 395/562] Stop writing to the event_txn_id table (#16175) --- changelog.d/16175.misc | 1 + synapse/handlers/message.py | 13 ------ synapse/storage/databases/main/events.py | 35 +--------------- .../storage/databases/main/events_worker.py | 41 +++++++------------ synapse/storage/schema/__init__.py | 16 +++----- tests/handlers/test_message.py | 15 ++----- 6 files changed, 26 insertions(+), 95 deletions(-) create mode 100644 changelog.d/16175.misc diff --git a/changelog.d/16175.misc b/changelog.d/16175.misc new file mode 100644 index 0000000000..308fbc2259 --- /dev/null +++ b/changelog.d/16175.misc @@ -0,0 +1 @@ +Stop using the `event_txn_id` table. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 187c3e6cc0..d6be18cdef 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -908,19 +908,6 @@ class EventCreationHandler: if existing_event_id: return existing_event_id - # Some requsters don't have device IDs (appservice, guests, and access - # tokens minted with the admin API), fallback to checking the access token - # ID, which should be close enough. - if requester.access_token_id: - existing_event_id = ( - await self.store.get_event_id_from_transaction_id_and_token_id( - room_id, - requester.user.to_string(), - requester.access_token_id, - txn_id, - ) - ) - return existing_event_id async def get_event_from_transaction( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index c784612f59..0c1ed75240 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -978,26 +978,12 @@ class PersistEventsStore: """Persist the mapping from transaction IDs to event IDs (if defined).""" inserted_ts = self._clock.time_msec() - to_insert_token_id: List[Tuple[str, str, str, int, str, int]] = [] to_insert_device_id: List[Tuple[str, str, str, str, str, int]] = [] for event, _ in events_and_contexts: txn_id = getattr(event.internal_metadata, "txn_id", None) - token_id = getattr(event.internal_metadata, "token_id", None) device_id = getattr(event.internal_metadata, "device_id", None) if txn_id is not None: - if token_id is not None: - to_insert_token_id.append( - ( - event.event_id, - event.room_id, - event.sender, - token_id, - txn_id, - inserted_ts, - ) - ) - if device_id is not None: to_insert_device_id.append( ( @@ -1010,26 +996,7 @@ class PersistEventsStore: ) ) - # Synapse usually relies on the device_id to scope transactions for events, - # except for users without device IDs (appservice, guests, and access - # tokens minted with the admin API) which use the access token ID instead. - # - # TODO https://github.com/matrix-org/synapse/issues/16042 - if to_insert_token_id: - self.db_pool.simple_insert_many_txn( - txn, - table="event_txn_id", - keys=( - "event_id", - "room_id", - "user_id", - "token_id", - "txn_id", - "inserted_ts", - ), - values=to_insert_token_id, - ) - + # Synapse relies on the device_id to scope transactions for events.. if to_insert_device_id: self.db_pool.simple_insert_many_txn( txn, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 7e7648c951..1eb313040e 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2022,25 +2022,6 @@ class EventsWorkerStore(SQLBaseStore): desc="get_next_event_to_expire", func=get_next_event_to_expire_txn ) - async def get_event_id_from_transaction_id_and_token_id( - self, room_id: str, user_id: str, token_id: int, txn_id: str - ) -> Optional[str]: - """Look up if we have already persisted an event for the transaction ID, - returning the event ID if so. - """ - return await self.db_pool.simple_select_one_onecol( - table="event_txn_id", - keyvalues={ - "room_id": room_id, - "user_id": user_id, - "token_id": token_id, - "txn_id": txn_id, - }, - retcol="event_id", - allow_none=True, - desc="get_event_id_from_transaction_id_and_token_id", - ) - async def get_event_id_from_transaction_id_and_device_id( self, room_id: str, user_id: str, device_id: str, txn_id: str ) -> Optional[str]: @@ -2072,29 +2053,35 @@ class EventsWorkerStore(SQLBaseStore): """ mapping = {} - txn_id_to_event: Dict[Tuple[str, int, str], str] = {} + txn_id_to_event: Dict[Tuple[str, str, str, str], str] = {} for event in events: - token_id = getattr(event.internal_metadata, "token_id", None) + device_id = getattr(event.internal_metadata, "device_id", None) txn_id = getattr(event.internal_metadata, "txn_id", None) - if token_id and txn_id: + if device_id and txn_id: # Check if this is a duplicate of an event in the given events. - existing = txn_id_to_event.get((event.room_id, token_id, txn_id)) + existing = txn_id_to_event.get( + (event.room_id, event.sender, device_id, txn_id) + ) if existing: mapping[event.event_id] = existing continue # Check if this is a duplicate of an event we've already # persisted. - existing = await self.get_event_id_from_transaction_id_and_token_id( - event.room_id, event.sender, token_id, txn_id + existing = await self.get_event_id_from_transaction_id_and_device_id( + event.room_id, event.sender, device_id, txn_id ) if existing: mapping[event.event_id] = existing - txn_id_to_event[(event.room_id, token_id, txn_id)] = existing + txn_id_to_event[ + (event.room_id, event.sender, device_id, txn_id) + ] = existing else: - txn_id_to_event[(event.room_id, token_id, txn_id)] = event.event_id + txn_id_to_event[ + (event.room_id, event.sender, device_id, txn_id) + ] = event.event_id return mapping diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 649d3c8e9f..422f11f59e 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 80 # remember to update the list below when updating +SCHEMA_VERSION = 81 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -114,19 +114,15 @@ Changes in SCHEMA_VERSION = 79 Changes in SCHEMA_VERSION = 80 - The event_txn_id_device_id is always written to for new events. - Add tables for the task scheduler. + +Changes in SCHEMA_VERSION = 81 + - The event_txn_id is no longer written to for new events. """ SCHEMA_COMPAT_VERSION = ( - # Queries against `event_stream_ordering` columns in membership tables must - # be disambiguated. - # - # The threads_id column must written to with non-null values for the - # event_push_actions, event_push_actions_staging, and event_push_summary tables. - # - # insertions to the column `full_user_id` of tables profiles and user_filters can no - # longer be null - 76 + # The `event_txn_id_device_id` must be written to for new events. + 80 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 9691d66b48..1c5897c84e 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -46,18 +46,11 @@ class EventCreationTestCase(unittest.HomeserverTestCase): self._persist_event_storage_controller = persistence self.user_id = self.register_user("tester", "foobar") - self.access_token = self.login("tester", "foobar") - self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token) + device_id = "dev-1" + access_token = self.login("tester", "foobar", device_id=device_id) + self.room_id = self.helper.create_room_as(self.user_id, tok=access_token) - info = self.get_success( - self.hs.get_datastores().main.get_user_by_access_token( - self.access_token, - ) - ) - assert info is not None - self.token_id = info.token_id - - self.requester = create_requester(self.user_id, access_token_id=self.token_id) + self.requester = create_requester(self.user_id, device_id=device_id) def _create_and_persist_member_event(self) -> Tuple[EventBase, EventContext]: # Create a member event we can use as an auth_event From 8c56e18e4786a28bedbafb1a73fd1abc8806933f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 30 Aug 2023 11:21:00 +0100 Subject: [PATCH 396/562] 1.91.0 --- CHANGES.md | 5 +++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index fcb50cd8c2..a17734cfe3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,8 @@ +# Synapse 1.91.0 (2023-08-30) + +No significant changes since 1.91.0rc1. + + # Synapse 1.91.0rc1 (2023-08-23) ### Features diff --git a/debian/changelog b/debian/changelog index 8b0615e421..9c0f77e16a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.91.0) stable; urgency=medium + + * New Synapse release 1.91.0. + + -- Synapse Packaging team Wed, 30 Aug 2023 11:18:10 +0100 + matrix-synapse-py3 (1.91.0~rc1) stable; urgency=medium * New Synapse release 1.91.0rc1. diff --git a/pyproject.toml b/pyproject.toml index c2421d7257..2a4ff1ea01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.91.0rc1" +version = "1.91.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From a2e0d4cd6024462f0067c56f83c2fe5b67da2109 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Aug 2023 14:18:42 +0100 Subject: [PATCH 397/562] Fix rare bug that broke looping calls (#16210) * Fix rare bug that broke looping calls We can't interact with the reactor from the main thread via looping call. Introduced in v1.90.0 / #15791. * Newsfile --- changelog.d/16210.bugfix | 1 + synapse/storage/databases/main/lock.py | 38 ++++++++++++++--------- tests/storage/databases/main/test_lock.py | 2 ++ 3 files changed, 26 insertions(+), 15 deletions(-) create mode 100644 changelog.d/16210.bugfix diff --git a/changelog.d/16210.bugfix b/changelog.d/16210.bugfix new file mode 100644 index 0000000000..39c35a1fe1 --- /dev/null +++ b/changelog.d/16210.bugfix @@ -0,0 +1 @@ +Fix rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 54d40e7a3a..5a01ec2137 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -17,7 +17,7 @@ from types import TracebackType from typing import TYPE_CHECKING, Collection, Optional, Set, Tuple, Type from weakref import WeakValueDictionary -from twisted.internet.interfaces import IReactorCore +from twisted.internet.task import LoopingCall from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore @@ -26,6 +26,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.types import ISynapseReactor from synapse.util import Clock from synapse.util.stringutils import random_string @@ -358,7 +359,7 @@ class Lock: def __init__( self, - reactor: IReactorCore, + reactor: ISynapseReactor, clock: Clock, store: LockStore, read_write: bool, @@ -377,19 +378,25 @@ class Lock: self._table = "worker_read_write_locks" if read_write else "worker_locks" - self._looping_call = clock.looping_call( - self._renew, - _RENEWAL_INTERVAL_MS, - store, - clock, - read_write, - lock_name, - lock_key, - token, - ) + # We might be called from a non-main thread, so we defer setting up the + # looping call. + self._looping_call: Optional[LoopingCall] = None + reactor.callFromThread(self._setup_looping_call) self._dropped = False + def _setup_looping_call(self) -> None: + self._looping_call = self._clock.looping_call( + self._renew, + _RENEWAL_INTERVAL_MS, + self._store, + self._clock, + self._read_write, + self._lock_name, + self._lock_key, + self._token, + ) + @staticmethod @wrap_as_background_process("Lock._renew") async def _renew( @@ -459,7 +466,7 @@ class Lock: if self._dropped: return - if self._looping_call.running: + if self._looping_call and self._looping_call.running: self._looping_call.stop() await self._store.db_pool.simple_delete( @@ -486,8 +493,9 @@ class Lock: # We should not be dropped without the lock being released (unless # we're shutting down), but if we are then let's at least stop # renewing the lock. - if self._looping_call.running: - self._looping_call.stop() + if self._looping_call and self._looping_call.running: + # We might be called from a non-main thread. + self._reactor.callFromThread(self._looping_call.stop) if self._reactor.running: logger.error( diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index f541f1d6be..650b4941ba 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -132,6 +132,7 @@ class LockTestCase(unittest.HomeserverTestCase): # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active. + assert lock._looping_call lock._looping_call.stop() # Wait for the lock to timeout. @@ -403,6 +404,7 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase): # We simulate the process getting stuck by cancelling the looping call # that keeps the lock active. + assert lock._looping_call lock._looping_call.stop() # Wait for the lock to timeout. From 3de82bb2af28f56696a79bf41ccffc81385b6e2c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Aug 2023 15:18:34 +0100 Subject: [PATCH 398/562] Gracefully handle failing to thumbnail images (#16211) --- changelog.d/16211.bugfix | 1 + synapse/__init__.py | 5 +++++ synapse/media/media_repository.py | 5 ++++- 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16211.bugfix diff --git a/changelog.d/16211.bugfix b/changelog.d/16211.bugfix new file mode 100644 index 0000000000..ab1816386c --- /dev/null +++ b/changelog.d/16211.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where uploading images would fail if we could not generate thumbnails for them. diff --git a/synapse/__init__.py b/synapse/__init__.py index 2f9c22a833..4a9bbc4d57 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -21,9 +21,14 @@ import os import sys from typing import Any, Dict +from PIL import ImageFile + from synapse.util.rust import check_rust_lib_up_to_date from synapse.util.stringutils import strtobool +# Allow truncated JPEG images to be thumbnailed. +ImageFile.LOAD_TRUNCATED_IMAGES = True + # Check that we're not running on an unsupported Python version. # # Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 4b750c700b..1b7b014f9a 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -214,7 +214,10 @@ class MediaRepository: user_id=auth_user, ) - await self._generate_thumbnails(None, media_id, media_id, media_type) + try: + await self._generate_thumbnails(None, media_id, media_id, media_type) + except Exception as e: + logger.info("Failed to generate thumbnails: %s", e) return MXCUri(self.server_name, media_id) From ed5e8a77ca3595cdaa5dcc273b8546cb20f88146 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 30 Aug 2023 22:55:47 +0100 Subject: [PATCH 399/562] Ignore redundant casts in latest deps CI job (#16213) --- .github/workflows/latest_deps.yml | 4 ++-- changelog.d/16213.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16213.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index ec6391cf8f..7b839f59c1 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -57,8 +57,8 @@ jobs: # `pip install matrix-synapse[all]` as closely as possible. - run: poetry update --no-dev - run: poetry run pip list > after.txt && (diff -u before.txt after.txt || true) - - name: Remove warn_unused_ignores from mypy config - run: sed '/warn_unused_ignores = True/d' -i mypy.ini + - name: Remove unhelpful options from mypy config + run: sed -e '/warn_unused_ignores = True/d' -e '/warn_redundant_casts = True/d' -i mypy.ini - run: poetry run mypy trial: needs: check_repo diff --git a/changelog.d/16213.misc b/changelog.d/16213.misc new file mode 100644 index 0000000000..8c14f5fd51 --- /dev/null +++ b/changelog.d/16213.misc @@ -0,0 +1 @@ +Fix the latest-deps CI job. From 6525fd65ee52e36929b9c35253c772da16aa2b99 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 1 Sep 2023 12:41:56 +0100 Subject: [PATCH 400/562] Log the details of background update failures (#16212) --- changelog.d/16212.misc | 1 + synapse/storage/background_updates.py | 4 ++-- tests/storage/test_background_update.py | 24 +++++++++++++++++++++++- 3 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16212.misc diff --git a/changelog.d/16212.misc b/changelog.d/16212.misc new file mode 100644 index 0000000000..19cf9b102d --- /dev/null +++ b/changelog.d/16212.misc @@ -0,0 +1 @@ +Log the details of background update failures. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index ddca0af1da..7619f405fa 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -405,14 +405,14 @@ class BackgroundUpdater: try: result = await self.do_next_background_update(sleep) back_to_back_failures = 0 - except Exception: + except Exception as e: + logger.exception("Error doing update: %s", e) back_to_back_failures += 1 if back_to_back_failures >= 5: self._aborted = True raise RuntimeError( "5 back-to-back background update failures; aborting." ) - logger.exception("Error doing update") else: if result: logger.info( diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 52beb4e89d..abf7d0564d 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import logging from unittest.mock import AsyncMock, Mock import yaml @@ -330,6 +330,28 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): self.update_handler.side_effect = update_short self.get_success(self.updates.do_next_background_update(False)) + def test_failed_update_logs_exception_details(self) -> None: + needle = "RUH ROH RAGGY" + + def failing_update(progress: JsonDict, count: int) -> int: + raise Exception(needle) + + self.update_handler.side_effect = failing_update + self.update_handler.reset_mock() + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": "{}"}, + ) + ) + + with self.assertLogs(level=logging.ERROR) as logs: + # Expect a back-to-back RuntimeError to be raised + self.get_failure(self.updates.run_background_updates(False), RuntimeError) + + self.assertTrue(any(needle in log for log in logs.output), logs.output) + class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: From 93f2fdd8d1d56a55bddc5b13fd46042ecabea178 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 1 Sep 2023 13:52:57 +0100 Subject: [PATCH 401/562] Fix typo where we ended up with multiple `WorkerLocksHandler` (#16220) I don't think has caused any actual issues. Introduced in #15891 --- changelog.d/16220.misc | 1 + synapse/server.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/16220.misc diff --git a/changelog.d/16220.misc b/changelog.d/16220.misc new file mode 100644 index 0000000000..329e9f76f6 --- /dev/null +++ b/changelog.d/16220.misc @@ -0,0 +1 @@ +Fix typo where we ended up with multiple `WorkerLocksHandler`. diff --git a/synapse/server.py b/synapse/server.py index fd16dacd0d..71ead524d6 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -913,6 +913,7 @@ class HomeServer(metaclass=abc.ABCMeta): """Usage metrics shared between phone home stats and the prometheus exporter.""" return CommonUsageMetricsManager(self) + @cache_in_self def get_worker_locks_handler(self) -> WorkerLocksHandler: return WorkerLocksHandler(self) From b85c3485b15900240a61fb5f2dca606adc1ff268 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 1 Sep 2023 13:52:57 +0100 Subject: [PATCH 402/562] Fix typo where we ended up with multiple `WorkerLocksHandler` (#16220) I don't think has caused any actual issues. Introduced in #15891 --- changelog.d/16220.misc | 1 + synapse/server.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/16220.misc diff --git a/changelog.d/16220.misc b/changelog.d/16220.misc new file mode 100644 index 0000000000..329e9f76f6 --- /dev/null +++ b/changelog.d/16220.misc @@ -0,0 +1 @@ +Fix typo where we ended up with multiple `WorkerLocksHandler`. diff --git a/synapse/server.py b/synapse/server.py index 7cdd3ea3c2..8f5e4fc140 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -914,6 +914,7 @@ class HomeServer(metaclass=abc.ABCMeta): """Usage metrics shared between phone home stats and the prometheus exporter.""" return CommonUsageMetricsManager(self) + @cache_in_self def get_worker_locks_handler(self) -> WorkerLocksHandler: return WorkerLocksHandler(self) From dcd3698e1f68bba3b3be8c09cf536295b67eeec5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 1 Sep 2023 16:09:23 +0100 Subject: [PATCH 403/562] Tentatively update changelog Will need to confirm this though --- changelog.d/16220.bugfix | 1 + changelog.d/16220.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/16220.bugfix delete mode 100644 changelog.d/16220.misc diff --git a/changelog.d/16220.bugfix b/changelog.d/16220.bugfix new file mode 100644 index 0000000000..dcfac6bda1 --- /dev/null +++ b/changelog.d/16220.bugfix @@ -0,0 +1 @@ +Fix a performance regression introduced in Synapse 1.91.0 where event persistence would cause excessive CPU usage over time. diff --git a/changelog.d/16220.misc b/changelog.d/16220.misc deleted file mode 100644 index 329e9f76f6..0000000000 --- a/changelog.d/16220.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typo where we ended up with multiple `WorkerLocksHandler`. From e9eb26e3aff63545c77980f0f7a0c04bcbccbda0 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 4 Sep 2023 11:57:59 +0100 Subject: [PATCH 404/562] Cache device resync requests over replication (#16241) --- changelog.d/16241.misc | 1 + synapse/replication/http/devices.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16241.misc diff --git a/changelog.d/16241.misc b/changelog.d/16241.misc new file mode 100644 index 0000000000..0fc5f34c5c --- /dev/null +++ b/changelog.d/16241.misc @@ -0,0 +1 @@ +Cache device resync requests over replication. diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 73f3de3642..209833d287 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -62,7 +62,7 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint): NAME = "multi_user_device_resync" PATH_ARGS = () - CACHE = False + CACHE = True def __init__(self, hs: "HomeServer"): super().__init__(hs) From 8065eea6c703f5300bdef3a4238ad85e38d4faa9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 12:03:27 +0100 Subject: [PATCH 405/562] Bump sentry-sdk from 1.29.2 to 1.30.0 (#16236) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6d63d71b2c..8cb1dc04a8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "alabaster" @@ -2077,6 +2077,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2084,8 +2085,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2102,6 +2110,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2109,6 +2118,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2380,13 +2390,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.29.2" +version = "1.30.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.29.2.tar.gz", hash = "sha256:a99ee105384788c3f228726a88baf515fe7b5f1d2d0f215a03d194369f158df7"}, - {file = "sentry_sdk-1.29.2-py2.py3-none-any.whl", hash = "sha256:3e17215d8006612e2df02b0e73115eb8376c37e3f586d8436fa41644e605074d"}, + {file = "sentry-sdk-1.30.0.tar.gz", hash = "sha256:7dc873b87e1faf4d00614afd1058bfa1522942f33daef8a59f90de8ed75cd10c"}, + {file = "sentry_sdk-1.30.0-py2.py3-none-any.whl", hash = "sha256:2e53ad63f96bb9da6570ba2e755c267e529edcf58580a2c0d2a11ef26e1e678b"}, ] [package.dependencies] @@ -2409,6 +2419,7 @@ httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] loguru = ["loguru (>=0.5)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] +opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] @@ -3339,4 +3350,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "87163d8994d09d3a7983ff647a9987d4277a3966dee48741437f4e98bca7e6db" +content-hash = "4a3a82becd89b91e76e2bc2f8ba72123f665c517d9b841d9a34cd01b83a1adc3" From 4382d5764091c3b8f083ae053d96feb97e7a857f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 12:04:50 +0100 Subject: [PATCH 406/562] Bump phonenumbers from 8.13.18 to 8.13.19 (#16237) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8cb1dc04a8..0688d5d92e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1607,13 +1607,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.18" +version = "8.13.19" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.18-py2.py3-none-any.whl", hash = "sha256:3d802739a22592e4127139349937753dee9b6a20bdd5d56847cd885bdc766b1f"}, - {file = "phonenumbers-8.13.18.tar.gz", hash = "sha256:b360c756252805d44b447b5bca6d250cf6bd6c69b6f0f4258f3bfe5ab81bef69"}, + {file = "phonenumbers-8.13.19-py2.py3-none-any.whl", hash = "sha256:ba542f20f6dc83be8f127f240f9b5b7e7c1dec42aceff1879400d4dc0c781d81"}, + {file = "phonenumbers-8.13.19.tar.gz", hash = "sha256:38180247697240ccedd74dec4bfbdbc22bb108b9c5f991f270ca3e41395e6f96"}, ] [[package]] From 748c38921cbde602833c36f9494aa4d991750604 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 13:25:39 +0100 Subject: [PATCH 407/562] Bump furo from 2023.7.26 to 2023.8.19 (#16238) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0688d5d92e..1cefabb358 100644 --- a/poetry.lock +++ b/poetry.lock @@ -555,13 +555,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", [[package]] name = "furo" -version = "2023.7.26" +version = "2023.8.19" description = "A clean customisable Sphinx documentation theme." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "furo-2023.7.26-py3-none-any.whl", hash = "sha256:1c7936929ec57c5ddecc7c85f07fa8b2ce536b5c89137764cca508be90e11efd"}, - {file = "furo-2023.7.26.tar.gz", hash = "sha256:257f63bab97aa85213a1fa24303837a3c3f30be92901ec732fea74290800f59e"}, + {file = "furo-2023.8.19-py3-none-any.whl", hash = "sha256:12f99f87a1873b6746228cfde18f77244e6c1ffb85d7fed95e638aae70d80590"}, + {file = "furo-2023.8.19.tar.gz", hash = "sha256:e671ee638ab3f1b472f4033b0167f502ab407830e0db0f843b1c1028119c9cd1"}, ] [package.dependencies] From 1cd0715a0f753512abacecb680bd6d48b7a89fe4 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 4 Sep 2023 14:04:16 +0100 Subject: [PATCH 408/562] 1.91.1 --- CHANGES.md | 7 +++++++ changelog.d/16220.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 14 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/16220.bugfix diff --git a/CHANGES.md b/CHANGES.md index a17734cfe3..7bd9d31619 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.91.1 (2023-09-04) + +### Bugfixes + +- Fix a performance regression introduced in Synapse 1.91.0 where event persistence would cause an excessive linear growth in CPU usage. ([\#16220](https://github.com/matrix-org/synapse/issues/16220)) + + # Synapse 1.91.0 (2023-08-30) No significant changes since 1.91.0rc1. diff --git a/changelog.d/16220.bugfix b/changelog.d/16220.bugfix deleted file mode 100644 index dcfac6bda1..0000000000 --- a/changelog.d/16220.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a performance regression introduced in Synapse 1.91.0 where event persistence would cause excessive CPU usage over time. diff --git a/debian/changelog b/debian/changelog index 9c0f77e16a..f737041567 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.91.1) stable; urgency=medium + + * New Synapse release 1.91.1. + + -- Synapse Packaging team Mon, 04 Sep 2023 14:03:18 +0100 + matrix-synapse-py3 (1.91.0) stable; urgency=medium * New Synapse release 1.91.0. diff --git a/pyproject.toml b/pyproject.toml index 2a4ff1ea01..409b27d902 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.91.0" +version = "1.91.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From f84baecb6f57b5ddb570c43574f774fae5e8afed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 4 Sep 2023 14:04:43 +0100 Subject: [PATCH 409/562] Don't reset retry timers on "valid" error codes (#16221) --- changelog.d/16221.bugfix | 1 + synapse/federation/transport/client.py | 4 +++- synapse/http/matrixfederationclient.py | 8 ++++++++ synapse/util/retryutils.py | 18 ++++++++++++++++-- tests/handlers/test_typing.py | 4 ++-- 5 files changed, 30 insertions(+), 5 deletions(-) create mode 100644 changelog.d/16221.bugfix diff --git a/changelog.d/16221.bugfix b/changelog.d/16221.bugfix new file mode 100644 index 0000000000..22678256e4 --- /dev/null +++ b/changelog.d/16221.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 5ce3f345cb..b5e4b2680e 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -249,8 +249,10 @@ class TransportLayerClient: data=json_data, json_data_callback=json_data_callback, long_retries=True, - backoff_on_404=True, # If we get a 404 the other side has gone try_trailing_slash_on_400=True, + # Sending a transaction should always succeed, if it doesn't + # then something is wrong and we should backoff. + backoff_on_all_error_codes=True, ) async def make_query( diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 11342ccac8..08c7fc1631 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -512,6 +512,7 @@ class MatrixFederationHttpClient: long_retries: bool = False, ignore_backoff: bool = False, backoff_on_404: bool = False, + backoff_on_all_error_codes: bool = False, ) -> IResponse: """ Sends a request to the given server. @@ -552,6 +553,7 @@ class MatrixFederationHttpClient: and try the request anyway. backoff_on_404: Back off if we get a 404 + backoff_on_all_error_codes: Back off if we get any error response Returns: Resolves with the HTTP response object on success. @@ -594,6 +596,7 @@ class MatrixFederationHttpClient: ignore_backoff=ignore_backoff, notifier=self.hs.get_notifier(), replication_client=self.hs.get_replication_command_handler(), + backoff_on_all_error_codes=backoff_on_all_error_codes, ) method_bytes = request.method.encode("ascii") @@ -889,6 +892,7 @@ class MatrixFederationHttpClient: backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, + backoff_on_all_error_codes: bool = False, ) -> JsonDict: ... @@ -906,6 +910,7 @@ class MatrixFederationHttpClient: backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser[T]] = None, + backoff_on_all_error_codes: bool = False, ) -> T: ... @@ -922,6 +927,7 @@ class MatrixFederationHttpClient: backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser[T]] = None, + backoff_on_all_error_codes: bool = False, ) -> Union[JsonDict, T]: """Sends the specified json data using PUT @@ -957,6 +963,7 @@ class MatrixFederationHttpClient: enabled. parser: The parser to use to decode the response. Defaults to parsing as JSON. + backoff_on_all_error_codes: Back off if we get any error response Returns: Succeeds when we get a 2xx HTTP response. The @@ -990,6 +997,7 @@ class MatrixFederationHttpClient: ignore_backoff=ignore_backoff, long_retries=long_retries, timeout=timeout, + backoff_on_all_error_codes=backoff_on_all_error_codes, ) if timeout is not None: diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 27e9fc976c..9d2065372c 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -128,6 +128,7 @@ class RetryDestinationLimiter: backoff_on_failure: bool = True, notifier: Optional["Notifier"] = None, replication_client: Optional["ReplicationCommandHandler"] = None, + backoff_on_all_error_codes: bool = False, ): """Marks the destination as "down" if an exception is thrown in the context, except for CodeMessageException with code < 500. @@ -147,6 +148,9 @@ class RetryDestinationLimiter: backoff_on_failure: set to False if we should not increase the retry interval on a failure. + + backoff_on_all_error_codes: Whether we should back off on any + error code. """ self.clock = clock self.store = store @@ -156,6 +160,7 @@ class RetryDestinationLimiter: self.retry_interval = retry_interval self.backoff_on_404 = backoff_on_404 self.backoff_on_failure = backoff_on_failure + self.backoff_on_all_error_codes = backoff_on_all_error_codes self.notifier = notifier self.replication_client = replication_client @@ -179,6 +184,7 @@ class RetryDestinationLimiter: exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: + success = exc_type is None valid_err_code = False if exc_type is None: valid_err_code = True @@ -195,7 +201,9 @@ class RetryDestinationLimiter: # won't accept our requests for at least a while. # 429 is us being aggressively rate limited, so lets rate limit # ourselves. - if exc_val.code == 404 and self.backoff_on_404: + if self.backoff_on_all_error_codes: + valid_err_code = False + elif exc_val.code == 404 and self.backoff_on_404: valid_err_code = False elif exc_val.code in (401, 429): valid_err_code = False @@ -204,7 +212,7 @@ class RetryDestinationLimiter: else: valid_err_code = False - if valid_err_code: + if success: # We connected successfully. if not self.retry_interval: return @@ -215,6 +223,12 @@ class RetryDestinationLimiter: self.failure_ts = None retry_last_ts = 0 self.retry_interval = 0 + elif valid_err_code: + # We got a potentially valid error code back. We don't reset the + # timers though, as the other side might actually be down anyway + # (e.g. some deprovisioned servers will always return a 404 or 403, + # and we don't want to keep resetting the retry timers for them). + return elif not self.backoff_on_failure: return else: diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 2a295da3a0..43c513b157 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -251,8 +251,8 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ), json_data_callback=ANY, long_retries=True, - backoff_on_404=True, try_trailing_slash_on_400=True, + backoff_on_all_error_codes=True, ) def test_started_typing_remote_recv(self) -> None: @@ -366,7 +366,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ), json_data_callback=ANY, long_retries=True, - backoff_on_404=True, + backoff_on_all_error_codes=True, try_trailing_slash_on_400=True, ) From dcb27783417a1161c484525afb839233299b847f Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 4 Sep 2023 18:13:28 +0200 Subject: [PATCH 410/562] Add last_seen_ts to the admin users API (#16218) --- changelog.d/16218.feature | 1 + docs/admin_api/user_admin_api.md | 2 + synapse/handlers/admin.py | 1 + synapse/rest/admin/users.py | 1 + synapse/storage/databases/main/__init__.py | 6 +- .../storage/databases/main/registration.py | 7 ++- synapse/storage/databases/main/stats.py | 1 + synapse/types/__init__.py | 2 + tests/rest/admin/test_user.py | 60 +++++++++++++++++++ tests/storage/test_registration.py | 1 + 10 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16218.feature diff --git a/changelog.d/16218.feature b/changelog.d/16218.feature new file mode 100644 index 0000000000..4afd092e88 --- /dev/null +++ b/changelog.d/16218.feature @@ -0,0 +1 @@ +Add `last_seen_ts` to the admin users API. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 8032e05497..975a7a0da4 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -242,6 +242,7 @@ The following parameters should be set in the URL: - `displayname` - Users are ordered alphabetically by `displayname`. - `avatar_url` - Users are ordered alphabetically by avatar URL. - `creation_ts` - Users are ordered by when the users was created in ms. + - `last_seen_ts` - Users are ordered by when the user was lastly seen in ms. - `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. Setting this value to `b` will reverse the above sort order. Defaults to `f`. @@ -272,6 +273,7 @@ The following fields are returned in the JSON response body: - `displayname` - string - The user's display name if they have set one. - `avatar_url` - string - The user's avatar URL if they have set one. - `creation_ts` - integer - The user's creation timestamp in ms. + - `last_seen_ts` - integer - The user's last activity timestamp in ms. - `next_token`: string representing a positive integer - Indication for pagination. See above. - `total` - integer - Total number of media. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 0e812a6d8b..2f0e5f3b0a 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -76,6 +76,7 @@ class AdminHandler: "consent_ts", "user_type", "is_guest", + "last_seen_ts", } if self._msc3866_enabled: diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 625a47ec1a..91898a5c13 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -132,6 +132,7 @@ class UsersRestServletV2(RestServlet): UserSortOrder.AVATAR_URL.value, UserSortOrder.SHADOW_BANNED.value, UserSortOrder.CREATION_TS.value, + UserSortOrder.LAST_SEEN_TS.value, ), ) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index a85633efcd..0836e247ef 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -277,6 +277,10 @@ class DataStore( FROM users as u LEFT JOIN profiles AS p ON u.name = p.full_user_id LEFT JOIN erased_users AS eu ON u.name = eu.user_id + LEFT JOIN ( + SELECT user_id, MAX(last_seen) AS last_seen_ts + FROM user_ips GROUP BY user_id + ) ls ON u.name = ls.user_id {where_clause} """ sql = "SELECT COUNT(*) as total_users " + sql_base @@ -286,7 +290,7 @@ class DataStore( sql = f""" SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url, creation_ts * 1000 as creation_ts, approved, - eu.user_id is not null as erased + eu.user_id is not null as erased, last_seen_ts {sql_base} ORDER BY {order_by_column} {order}, u.name ASC LIMIT ? OFFSET ? diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index d3a01d526f..7e85b73e8e 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -206,8 +206,12 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): consent_server_notice_sent, appservice_id, creation_ts, user_type, deactivated, COALESCE(shadow_banned, FALSE) AS shadow_banned, COALESCE(approved, TRUE) AS approved, - COALESCE(locked, FALSE) AS locked + COALESCE(locked, FALSE) AS locked, last_seen_ts FROM users + LEFT JOIN ( + SELECT user_id, MAX(last_seen) AS last_seen_ts + FROM user_ips GROUP BY user_id + ) ls ON users.name = ls.user_id WHERE name = ? """, (user_id,), @@ -268,6 +272,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): is_shadow_banned=bool(user_data["shadow_banned"]), user_id=UserID.from_string(user_data["name"]), user_type=user_data["user_type"], + last_seen_ts=user_data["last_seen_ts"], ) async def is_trial_user(self, user_id: str) -> bool: diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 6298f0984d..3a2966b9e4 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -107,6 +107,7 @@ class UserSortOrder(Enum): AVATAR_URL = "avatar_url" SHADOW_BANNED = "shadow_banned" CREATION_TS = "creation_ts" + LAST_SEEN_TS = "last_seen_ts" class StatsStore(StateDeltasStore): diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index e750417189..488714f60c 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -946,6 +946,7 @@ class UserInfo: is_guest: True if the user is a guest user. is_shadow_banned: True if the user has been shadow-banned. user_type: User type (None for normal user, 'support' and 'bot' other options). + last_seen_ts: Last activity timestamp of the user. """ user_id: UserID @@ -958,6 +959,7 @@ class UserInfo: is_deactivated: bool is_guest: bool is_shadow_banned: bool + last_seen_ts: Optional[int] class UserProfile(TypedDict): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 2f6bd0d74f..761871b933 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -40,6 +40,7 @@ from synapse.rest.client import ( user_directory, ) from synapse.server import HomeServer +from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock @@ -456,6 +457,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, + room.register_servlets, ] url = "/_synapse/admin/v2/users" @@ -506,6 +508,62 @@ class UsersListTestCase(unittest.HomeserverTestCase): # Check that all fields are available self._check_fields(channel.json_body["users"]) + def test_last_seen(self) -> None: + """ + Test that last_seen_ts field is properly working. + """ + user1 = self.register_user("u1", "pass") + user1_token = self.login("u1", "pass") + user2 = self.register_user("u2", "pass") + user2_token = self.login("u2", "pass") + user3 = self.register_user("u3", "pass") + user3_token = self.login("u3", "pass") + + self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + self.reactor.advance(10) + self.helper.create_room_as(user2, tok=user2_token) + self.reactor.advance(10) + self.helper.create_room_as(user1, tok=user1_token) + self.reactor.advance(10) + self.helper.create_room_as(user3, tok=user3_token) + self.reactor.advance(10) + + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(4, len(channel.json_body["users"])) + self.assertEqual(4, channel.json_body["total"]) + + admin_last_seen = channel.json_body["users"][0]["last_seen_ts"] + user1_last_seen = channel.json_body["users"][1]["last_seen_ts"] + user2_last_seen = channel.json_body["users"][2]["last_seen_ts"] + user3_last_seen = channel.json_body["users"][3]["last_seen_ts"] + self.assertTrue(admin_last_seen > 0 and admin_last_seen < 10000) + self.assertTrue(user2_last_seen > 10000 and user2_last_seen < 20000) + self.assertTrue(user1_last_seen > 20000 and user1_last_seen < 30000) + self.assertTrue(user3_last_seen > 30000 and user3_last_seen < 40000) + + self._order_test([self.admin_user, user2, user1, user3], "last_seen_ts") + + self.reactor.advance(LAST_SEEN_GRANULARITY / 1000) + self.helper.create_room_as(user1, tok=user1_token) + self.reactor.advance(10) + + channel = self.make_request( + "GET", + self.url + "/" + user1, + access_token=self.admin_user_tok, + ) + self.assertTrue( + channel.json_body["last_seen_ts"] > 40000 + LAST_SEEN_GRANULARITY + ) + + self._order_test([self.admin_user, user2, user3, user1], "last_seen_ts") + def test_search_term(self) -> None: """Test that searching for a users works correctly""" @@ -1135,6 +1193,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): self.assertIn("displayname", u) self.assertIn("avatar_url", u) self.assertIn("creation_ts", u) + self.assertIn("last_seen_ts", u) def _create_users(self, number_users: int) -> None: """ @@ -3035,6 +3094,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertIn("consent_version", content) self.assertIn("consent_ts", content) self.assertIn("external_ids", content) + self.assertIn("last_seen_ts", content) # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", content) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index ba41459d08..95c9792d54 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -51,6 +51,7 @@ class RegistrationStoreTestCase(HomeserverTestCase): "locked": 0, "shadow_banned": 0, "approved": 1, + "last_seen_ts": None, }, (self.get_success(self.store.get_user_by_id(self.user_id))), ) From d35bed8369514fe727b4fe1afb68f48cc8b2655a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 4 Sep 2023 17:14:09 +0100 Subject: [PATCH 411/562] Don't wake up destination transaction queue if they're not due for retry. (#16223) --- changelog.d/16223.feature | 1 + synapse/federation/send_queue.py | 12 +-- synapse/federation/sender/__init__.py | 86 +++++++++++++------ .../sender/per_destination_queue.py | 6 +- synapse/handlers/device.py | 26 +++--- synapse/handlers/devicemessage.py | 7 +- synapse/handlers/presence.py | 16 ++-- synapse/handlers/typing.py | 14 ++- synapse/module_api/__init__.py | 2 +- synapse/replication/tcp/client.py | 8 +- .../storage/databases/main/transactions.py | 26 +++++- synapse/util/retryutils.py | 25 ++++++ tests/federation/test_federation_sender.py | 27 ++++-- tests/handlers/test_presence.py | 60 ++++++++++--- tests/handlers/test_typing.py | 2 - 15 files changed, 228 insertions(+), 90 deletions(-) create mode 100644 changelog.d/16223.feature diff --git a/changelog.d/16223.feature b/changelog.d/16223.feature new file mode 100644 index 0000000000..a52d66658b --- /dev/null +++ b/changelog.d/16223.feature @@ -0,0 +1 @@ +Improve resource usage when sending data to a large number of remote hosts that are marked as "down". diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index fb448f2155..6520795635 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -49,7 +49,7 @@ from synapse.api.presence import UserPresenceState from synapse.federation.sender import AbstractFederationSender, FederationSender from synapse.metrics import LaterGauge from synapse.replication.tcp.streams.federation import FederationStream -from synapse.types import JsonDict, ReadReceipt, RoomStreamToken +from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection from synapse.util.metrics import Measure from .units import Edu @@ -229,7 +229,7 @@ class FederationRemoteSendQueue(AbstractFederationSender): """ # nothing to do here: the replication listener will handle it. - def send_presence_to_destinations( + async def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] ) -> None: """As per FederationSender @@ -245,7 +245,9 @@ class FederationRemoteSendQueue(AbstractFederationSender): self.notifier.on_new_replication_data() - def send_device_messages(self, destination: str, immediate: bool = True) -> None: + async def send_device_messages( + self, destinations: StrCollection, immediate: bool = True + ) -> None: """As per FederationSender""" # We don't need to replicate this as it gets sent down a different # stream. @@ -463,7 +465,7 @@ class ParsedFederationStreamData: edus: Dict[str, List[Edu]] -def process_rows_for_federation( +async def process_rows_for_federation( transaction_queue: FederationSender, rows: List[FederationStream.FederationStreamRow], ) -> None: @@ -496,7 +498,7 @@ def process_rows_for_federation( parsed_row.add_to_buffer(buff) for state, destinations in buff.presence_destinations: - transaction_queue.send_presence_to_destinations( + await transaction_queue.send_presence_to_destinations( states=[state], destinations=destinations ) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 97abbdee18..fb20fd8a10 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -147,7 +147,10 @@ from twisted.internet import defer import synapse.metrics from synapse.api.presence import UserPresenceState from synapse.events import EventBase -from synapse.federation.sender.per_destination_queue import PerDestinationQueue +from synapse.federation.sender.per_destination_queue import ( + CATCHUP_RETRY_INTERVAL, + PerDestinationQueue, +) from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu from synapse.logging.context import make_deferred_yieldable, run_in_background @@ -161,9 +164,10 @@ from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, ) -from synapse.types import JsonDict, ReadReceipt, RoomStreamToken +from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection from synapse.util import Clock from synapse.util.metrics import Measure +from synapse.util.retryutils import filter_destinations_by_retry_limiter if TYPE_CHECKING: from synapse.events.presence_router import PresenceRouter @@ -213,7 +217,7 @@ class AbstractFederationSender(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def send_presence_to_destinations( + async def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] ) -> None: """Send the given presence states to the given destinations. @@ -242,9 +246,11 @@ class AbstractFederationSender(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def send_device_messages(self, destination: str, immediate: bool = True) -> None: + async def send_device_messages( + self, destinations: StrCollection, immediate: bool = True + ) -> None: """Tells the sender that a new device message is ready to be sent to the - destination. The `immediate` flag specifies whether the messages should + destinations. The `immediate` flag specifies whether the messages should be tried to be sent immediately, or whether it can be delayed for a short while (to aid performance). """ @@ -716,6 +722,13 @@ class FederationSender(AbstractFederationSender): pdu.internal_metadata.stream_ordering, ) + destinations = await filter_destinations_by_retry_limiter( + destinations, + clock=self.clock, + store=self.store, + retry_due_within_ms=CATCHUP_RETRY_INTERVAL, + ) + for destination in destinations: self._get_per_destination_queue(destination).send_pdu(pdu) @@ -763,12 +776,20 @@ class FederationSender(AbstractFederationSender): domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation( room_id ) - domains = [ + domains: StrCollection = [ d for d in domains_set if not self.is_mine_server_name(d) and self._federation_shard_config.should_handle(self._instance_name, d) ] + + domains = await filter_destinations_by_retry_limiter( + domains, + clock=self.clock, + store=self.store, + retry_due_within_ms=CATCHUP_RETRY_INTERVAL, + ) + if not domains: return @@ -816,7 +837,7 @@ class FederationSender(AbstractFederationSender): for queue in queues: queue.flush_read_receipts_for_room(room_id) - def send_presence_to_destinations( + async def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] ) -> None: """Send the given presence states to the given destinations. @@ -831,13 +852,20 @@ class FederationSender(AbstractFederationSender): for state in states: assert self.is_mine_id(state.user_id) + destinations = await filter_destinations_by_retry_limiter( + [ + d + for d in destinations + if self._federation_shard_config.should_handle(self._instance_name, d) + ], + clock=self.clock, + store=self.store, + retry_due_within_ms=CATCHUP_RETRY_INTERVAL, + ) + for destination in destinations: if self.is_mine_server_name(destination): continue - if not self._federation_shard_config.should_handle( - self._instance_name, destination - ): - continue self._get_per_destination_queue(destination).send_presence( states, start_loop=False @@ -896,21 +924,29 @@ class FederationSender(AbstractFederationSender): else: queue.send_edu(edu) - def send_device_messages(self, destination: str, immediate: bool = True) -> None: - if self.is_mine_server_name(destination): - logger.warning("Not sending device update to ourselves") - return + async def send_device_messages( + self, destinations: StrCollection, immediate: bool = True + ) -> None: + destinations = await filter_destinations_by_retry_limiter( + [ + destination + for destination in destinations + if self._federation_shard_config.should_handle( + self._instance_name, destination + ) + and not self.is_mine_server_name(destination) + ], + clock=self.clock, + store=self.store, + retry_due_within_ms=CATCHUP_RETRY_INTERVAL, + ) - if not self._federation_shard_config.should_handle( - self._instance_name, destination - ): - return - - if immediate: - self._get_per_destination_queue(destination).attempt_new_transaction() - else: - self._get_per_destination_queue(destination).mark_new_data() - self._destination_wakeup_queue.add_to_queue(destination) + for destination in destinations: + if immediate: + self._get_per_destination_queue(destination).attempt_new_transaction() + else: + self._get_per_destination_queue(destination).mark_new_data() + self._destination_wakeup_queue.add_to_queue(destination) def wake_destination(self, destination: str) -> None: """Called when we want to retry sending transactions to a remote. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 31c5c2b7de..9105ba664c 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -59,6 +59,10 @@ sent_edus_by_type = Counter( ) +# If the retry interval is larger than this then we enter "catchup" mode +CATCHUP_RETRY_INTERVAL = 60 * 60 * 1000 + + class PerDestinationQueue: """ Manages the per-destination transmission queues. @@ -370,7 +374,7 @@ class PerDestinationQueue: ), ) - if e.retry_interval > 60 * 60 * 1000: + if e.retry_interval > CATCHUP_RETRY_INTERVAL: # we won't retry for another hour! # (this suggests a significant outage) # We drop pending EDUs because otherwise they will diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 5ae427d52c..763f56dfc1 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -836,17 +836,16 @@ class DeviceHandler(DeviceWorkerHandler): user_id, hosts, ) - for host in hosts: - self.federation_sender.send_device_messages( - host, immediate=False - ) - # TODO: when called, this isn't in a logging context. - # This leads to log spam, sentry event spam, and massive - # memory usage. - # See https://github.com/matrix-org/synapse/issues/12552. - # log_kv( - # {"message": "sent device update to host", "host": host} - # ) + await self.federation_sender.send_device_messages( + hosts, immediate=False + ) + # TODO: when called, this isn't in a logging context. + # This leads to log spam, sentry event spam, and massive + # memory usage. + # See https://github.com/matrix-org/synapse/issues/12552. + # log_kv( + # {"message": "sent device update to host", "host": host} + # ) if current_stream_id != stream_id: # Clear the set of hosts we've already sent to as we're @@ -951,8 +950,9 @@ class DeviceHandler(DeviceWorkerHandler): # Notify things that device lists need to be sent out. self.notifier.notify_replication() - for host in potentially_changed_hosts: - self.federation_sender.send_device_messages(host, immediate=False) + await self.federation_sender.send_device_messages( + potentially_changed_hosts, immediate=False + ) def _update_device_from_client_ips( diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 798c7039f9..1c79f7a61e 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -302,10 +302,9 @@ class DeviceMessageHandler: ) if self.federation_sender: - for destination in remote_messages.keys(): - # Enqueue a new federation transaction to send the new - # device messages to each remote destination. - self.federation_sender.send_device_messages(destination) + # Enqueue a new federation transaction to send the new + # device messages to each remote destination. + await self.federation_sender.send_device_messages(remote_messages.keys()) async def get_events_for_dehydrated_device( self, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 2f841863ae..f31e18328b 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -354,7 +354,9 @@ class BasePresenceHandler(abc.ABC): ) for destination, host_states in hosts_to_states.items(): - self._federation.send_presence_to_destinations(host_states, [destination]) + await self._federation.send_presence_to_destinations( + host_states, [destination] + ) async def send_full_presence_to_users(self, user_ids: StrCollection) -> None: """ @@ -936,7 +938,7 @@ class PresenceHandler(BasePresenceHandler): ) for destination, states in hosts_to_states.items(): - self._federation_queue.send_presence_to_destinations( + await self._federation_queue.send_presence_to_destinations( states, [destination] ) @@ -1508,7 +1510,7 @@ class PresenceHandler(BasePresenceHandler): or state.status_msg is not None ] - self._federation_queue.send_presence_to_destinations( + await self._federation_queue.send_presence_to_destinations( destinations=newly_joined_remote_hosts, states=states, ) @@ -1519,7 +1521,7 @@ class PresenceHandler(BasePresenceHandler): prev_remote_hosts or newly_joined_remote_hosts ): local_states = await self.current_state_for_users(newly_joined_local_users) - self._federation_queue.send_presence_to_destinations( + await self._federation_queue.send_presence_to_destinations( destinations=prev_remote_hosts | newly_joined_remote_hosts, states=list(local_states.values()), ) @@ -2182,7 +2184,7 @@ class PresenceFederationQueue: index = bisect(self._queue, (clear_before,)) self._queue = self._queue[index:] - def send_presence_to_destinations( + async def send_presence_to_destinations( self, states: Collection[UserPresenceState], destinations: StrCollection ) -> None: """Send the presence states to the given destinations. @@ -2202,7 +2204,7 @@ class PresenceFederationQueue: return if self._federation: - self._federation.send_presence_to_destinations( + await self._federation.send_presence_to_destinations( states=states, destinations=destinations, ) @@ -2325,7 +2327,7 @@ class PresenceFederationQueue: for host, user_ids in hosts_to_users.items(): states = await self._presence_handler.current_state_for_users(user_ids) - self._federation.send_presence_to_destinations( + await self._federation.send_presence_to_destinations( states=states.values(), destinations=[host], ) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 7aeae5319c..4b4227003d 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -26,9 +26,10 @@ from synapse.metrics.background_process_metrics import ( ) from synapse.replication.tcp.streams import TypingStream from synapse.streams import EventSource -from synapse.types import JsonDict, Requester, StreamKeyType, UserID +from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType, UserID from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.metrics import Measure +from synapse.util.retryutils import filter_destinations_by_retry_limiter from synapse.util.wheel_timer import WheelTimer if TYPE_CHECKING: @@ -150,8 +151,15 @@ class FollowerTypingHandler: now=now, obj=member, then=now + FEDERATION_PING_INTERVAL ) - hosts = await self._storage_controllers.state.get_current_hosts_in_room( - member.room_id + hosts: StrCollection = ( + await self._storage_controllers.state.get_current_hosts_in_room( + member.room_id + ) + ) + hosts = await filter_destinations_by_retry_limiter( + hosts, + clock=self.clock, + store=self.store, ) for domain in hosts: if not self.is_mine_server_name(domain): diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 9ad8e038ae..2f00a7ba20 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1180,7 +1180,7 @@ class ModuleApi: # Send to remote destinations. destination = UserID.from_string(user).domain - presence_handler.get_federation_queue().send_presence_to_destinations( + await presence_handler.get_federation_queue().send_presence_to_destinations( presence_events, [destination] ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 3b88dc68ea..51285e6d33 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -422,7 +422,7 @@ class FederationSenderHandler: # The federation stream contains things that we want to send out, e.g. # presence, typing, etc. if stream_name == "federation": - send_queue.process_rows_for_federation(self.federation_sender, rows) + await send_queue.process_rows_for_federation(self.federation_sender, rows) await self.update_token(token) # ... and when new receipts happen @@ -439,16 +439,14 @@ class FederationSenderHandler: for row in rows if not row.entity.startswith("@") and not row.is_signature } - for host in hosts: - self.federation_sender.send_device_messages(host, immediate=False) + await self.federation_sender.send_device_messages(hosts, immediate=False) elif stream_name == ToDeviceStream.NAME: # The to_device stream includes stuff to be pushed to both local # clients and remote servers, so we ignore entities that start with # '@' (since they'll be local users rather than destinations). hosts = {row.entity for row in rows if not row.entity.startswith("@")} - for host in hosts: - self.federation_sender.send_device_messages(host) + await self.federation_sender.send_device_messages(hosts) async def _on_new_receipts( self, rows: Iterable[ReceiptsStream.ReceiptsStreamRow] diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 860bbf7c0f..efd21b5bfc 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -14,7 +14,7 @@ import logging from enum import Enum -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, cast import attr from canonicaljson import encode_canonical_json @@ -28,8 +28,8 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore -from synapse.types import JsonDict -from synapse.util.caches.descriptors import cached +from synapse.types import JsonDict, StrCollection +from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: from synapse.server import HomeServer @@ -205,6 +205,26 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): else: return None + @cachedList( + cached_method_name="get_destination_retry_timings", list_name="destinations" + ) + async def get_destination_retry_timings_batch( + self, destinations: StrCollection + ) -> Dict[str, Optional[DestinationRetryTimings]]: + rows = await self.db_pool.simple_select_many_batch( + table="destinations", + iterable=destinations, + column="destination", + retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"), + desc="get_destination_retry_timings_batch", + ) + + return { + row.pop("destination"): DestinationRetryTimings(**row) + for row in rows + if row["retry_last_ts"] and row["failure_ts"] and row["retry_interval"] + } + async def set_destination_retry_timings( self, destination: str, diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 9d2065372c..0e1f907667 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, Any, Optional, Type from synapse.api.errors import CodeMessageException from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage import DataStore +from synapse.types import StrCollection from synapse.util import Clock if TYPE_CHECKING: @@ -116,6 +117,30 @@ async def get_retry_limiter( ) +async def filter_destinations_by_retry_limiter( + destinations: StrCollection, + clock: Clock, + store: DataStore, + retry_due_within_ms: int = 0, +) -> StrCollection: + """Filter down the list of destinations to only those that will are either + alive or due for a retry (within `retry_due_within_ms`) + """ + if not destinations: + return destinations + + retry_timings = await store.get_destination_retry_timings_batch(destinations) + + now = int(clock.time_msec()) + + return [ + destination + for destination, timings in retry_timings.items() + if timings is None + or timings.retry_last_ts + timings.retry_interval <= now + retry_due_within_ms + ] + + class RetryDestinationLimiter: def __init__( self, diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 7bd3d06859..caf04b54cb 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -75,7 +75,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): thread_id=None, data={"ts": 1234}, ) - self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) + self.get_success(sender.send_read_receipt(receipt)) self.pump() @@ -111,6 +111,9 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): # * The same room / user on multiple threads. # * A different user in the same room. sender = self.hs.get_federation_sender() + # Hack so that we have a txn in-flight so we batch up read receipts + # below + sender.wake_destination("host2") for user, thread in ( ("alice", None), ("alice", "thread"), @@ -125,9 +128,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): thread_id=thread, data={"ts": 1234}, ) - self.successResultOf( - defer.ensureDeferred(sender.send_read_receipt(receipt)) - ) + defer.ensureDeferred(sender.send_read_receipt(receipt)) self.pump() @@ -191,7 +192,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): thread_id=None, data={"ts": 1234}, ) - self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) + self.get_success(sender.send_read_receipt(receipt)) self.pump() @@ -342,7 +343,9 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.reactor.advance(1) # a second call should produce no new device EDUs - self.hs.get_federation_sender().send_device_messages("host2") + self.get_success( + self.hs.get_federation_sender().send_device_messages(["host2"]) + ) self.assertEqual(self.edus, []) # a second device @@ -550,7 +553,9 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # recover the server mock_send_txn.side_effect = self.record_transaction - self.hs.get_federation_sender().send_device_messages("host2") + self.get_success( + self.hs.get_federation_sender().send_device_messages(["host2"]) + ) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -601,7 +606,9 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # recover the server mock_send_txn.side_effect = self.record_transaction - self.hs.get_federation_sender().send_device_messages("host2") + self.get_success( + self.hs.get_federation_sender().send_device_messages(["host2"]) + ) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -656,7 +663,9 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): # recover the server mock_send_txn.side_effect = self.record_transaction - self.hs.get_federation_sender().send_device_messages("host2") + self.get_success( + self.hs.get_federation_sender().send_device_messages(["host2"]) + ) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index a987267308..88a16193a3 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -909,8 +909,14 @@ class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): prev_token = self.queue.get_current_token(self.instance_name) - self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) - self.queue.send_presence_to_destinations((state3,), ("dest3",)) + self.get_success( + self.queue.send_presence_to_destinations( + (state1, state2), ("dest1", "dest2") + ) + ) + self.get_success( + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + ) now_token = self.queue.get_current_token(self.instance_name) @@ -946,11 +952,17 @@ class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): prev_token = self.queue.get_current_token(self.instance_name) - self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.get_success( + self.queue.send_presence_to_destinations( + (state1, state2), ("dest1", "dest2") + ) + ) now_token = self.queue.get_current_token(self.instance_name) - self.queue.send_presence_to_destinations((state3,), ("dest3",)) + self.get_success( + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + ) rows, upto_token, limited = self.get_success( self.queue.get_replication_rows("master", prev_token, now_token, 10) @@ -989,8 +1001,14 @@ class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): prev_token = self.queue.get_current_token(self.instance_name) - self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) - self.queue.send_presence_to_destinations((state3,), ("dest3",)) + self.get_success( + self.queue.send_presence_to_destinations( + (state1, state2), ("dest1", "dest2") + ) + ) + self.get_success( + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + ) self.reactor.advance(10 * 60 * 1000) @@ -1005,8 +1023,14 @@ class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): prev_token = self.queue.get_current_token(self.instance_name) - self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) - self.queue.send_presence_to_destinations((state3,), ("dest3",)) + self.get_success( + self.queue.send_presence_to_destinations( + (state1, state2), ("dest1", "dest2") + ) + ) + self.get_success( + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + ) now_token = self.queue.get_current_token(self.instance_name) @@ -1033,11 +1057,17 @@ class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): prev_token = self.queue.get_current_token(self.instance_name) - self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.get_success( + self.queue.send_presence_to_destinations( + (state1, state2), ("dest1", "dest2") + ) + ) self.reactor.advance(2 * 60 * 1000) - self.queue.send_presence_to_destinations((state3,), ("dest3",)) + self.get_success( + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + ) self.reactor.advance(4 * 60 * 1000) @@ -1053,8 +1083,14 @@ class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): prev_token = self.queue.get_current_token(self.instance_name) - self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) - self.queue.send_presence_to_destinations((state3,), ("dest3",)) + self.get_success( + self.queue.send_presence_to_destinations( + (state1, state2), ("dest1", "dest2") + ) + ) + self.get_success( + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + ) now_token = self.queue.get_current_token(self.instance_name) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 43c513b157..95106ec8f3 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -120,8 +120,6 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore = hs.get_datastores().main - self.datastore.get_destination_retry_timings = AsyncMock(return_value=None) - self.datastore.get_device_updates_by_remote = AsyncMock( # type: ignore[method-assign] return_value=(0, []) ) From 0425dd28f4c435735759154a5bdb6a793b60f502 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 5 Sep 2023 11:21:54 +0100 Subject: [PATCH 412/562] 1.92.0rc1 --- CHANGES.md | 57 +++++++++++++++++++++++++++++++++++++++ changelog.d/15816.feature | 1 - changelog.d/16008.doc | 1 - changelog.d/16099.misc | 1 - changelog.d/16113.feature | 1 - changelog.d/16121.misc | 1 - changelog.d/16135.misc | 1 - changelog.d/16136.feature | 1 - changelog.d/16155.bugfix | 1 - changelog.d/16168.doc | 1 - changelog.d/16170.misc | 1 - changelog.d/16171.misc | 1 - changelog.d/16172.misc | 1 - changelog.d/16175.misc | 1 - changelog.d/16178.doc | 1 - changelog.d/16179.misc | 1 - changelog.d/16180.misc | 1 - changelog.d/16183.misc | 1 - changelog.d/16184.misc | 1 - changelog.d/16185.bugfix | 1 - changelog.d/16186.misc | 1 - changelog.d/16187.misc | 1 - changelog.d/16188.misc | 1 - changelog.d/16201.misc | 1 - changelog.d/16205.bugfix | 1 - changelog.d/16210.bugfix | 1 - changelog.d/16211.bugfix | 1 - changelog.d/16212.misc | 1 - changelog.d/16213.misc | 1 - changelog.d/16218.feature | 1 - changelog.d/16220.misc | 1 - changelog.d/16221.bugfix | 1 - changelog.d/16223.feature | 1 - changelog.d/16241.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 36 files changed, 64 insertions(+), 34 deletions(-) delete mode 100644 changelog.d/15816.feature delete mode 100644 changelog.d/16008.doc delete mode 100644 changelog.d/16099.misc delete mode 100644 changelog.d/16113.feature delete mode 100644 changelog.d/16121.misc delete mode 100644 changelog.d/16135.misc delete mode 100644 changelog.d/16136.feature delete mode 100644 changelog.d/16155.bugfix delete mode 100644 changelog.d/16168.doc delete mode 100644 changelog.d/16170.misc delete mode 100644 changelog.d/16171.misc delete mode 100644 changelog.d/16172.misc delete mode 100644 changelog.d/16175.misc delete mode 100644 changelog.d/16178.doc delete mode 100644 changelog.d/16179.misc delete mode 100644 changelog.d/16180.misc delete mode 100644 changelog.d/16183.misc delete mode 100644 changelog.d/16184.misc delete mode 100644 changelog.d/16185.bugfix delete mode 100644 changelog.d/16186.misc delete mode 100644 changelog.d/16187.misc delete mode 100644 changelog.d/16188.misc delete mode 100644 changelog.d/16201.misc delete mode 100644 changelog.d/16205.bugfix delete mode 100644 changelog.d/16210.bugfix delete mode 100644 changelog.d/16211.bugfix delete mode 100644 changelog.d/16212.misc delete mode 100644 changelog.d/16213.misc delete mode 100644 changelog.d/16218.feature delete mode 100644 changelog.d/16220.misc delete mode 100644 changelog.d/16221.bugfix delete mode 100644 changelog.d/16223.feature delete mode 100644 changelog.d/16241.misc diff --git a/CHANGES.md b/CHANGES.md index 7bd9d31619..c13083c230 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,60 @@ +# Synapse 1.92.0rc1 (2023-09-05) + +### Features + +- Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. ([\#15816](https://github.com/matrix-org/synapse/issues/15816)) +- Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). ([\#16113](https://github.com/matrix-org/synapse/issues/16113)) +- Return a `Retry-After` with `M_LIMIT_EXCEEDED` error responses. ([\#16136](https://github.com/matrix-org/synapse/issues/16136)) +- Add `last_seen_ts` to the admin users API. ([\#16218](https://github.com/matrix-org/synapse/issues/16218)) +- Improve resource usage when sending data to a large number of remote hosts that are marked as "down". ([\#16223](https://github.com/matrix-org/synapse/issues/16223)) + +### Bugfixes + +- Fix IPv6-related bugs on SMTP settings, adding groundwork to fix similar issues. Contributed by @evilham and @telmich (ungleich.ch). ([\#16155](https://github.com/matrix-org/synapse/issues/16155)) +- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `include_all_networks` as a string. ([\#16185](https://github.com/matrix-org/synapse/issues/16185)) +- Fix inaccurate error message while attempting to ban or unban a user with the same or higher PL by spliting the conditional statements. Contributed by @leviosacz. ([\#16205](https://github.com/matrix-org/synapse/issues/16205)) +- Fix rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. ([\#16210](https://github.com/matrix-org/synapse/issues/16210)) +- Fix a long-standing bug where uploading images would fail if we could not generate thumbnails for them. ([\#16211](https://github.com/matrix-org/synapse/issues/16211)) +- Fix long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. ([\#16221](https://github.com/matrix-org/synapse/issues/16221)) + +### Improved Documentation + +- Update links to the matrix.org blog. ([\#16008](https://github.com/matrix-org/synapse/issues/16008)) +- Document which admin APIs are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. ([\#16168](https://github.com/matrix-org/synapse/issues/16168)) +- Document `exclude_rooms_from_sync` configuration option. ([\#16178](https://github.com/matrix-org/synapse/issues/16178)) + +### Internal Changes + +- Prepare unit tests for Python 3.12. ([\#16099](https://github.com/matrix-org/synapse/issues/16099)) +- Attempt to fix the twisted trunk job. ([\#16121](https://github.com/matrix-org/synapse/issues/16121)) +- Describe which rate limiter was hit in logs. ([\#16135](https://github.com/matrix-org/synapse/issues/16135)) +- Simplify presence code when using workers. ([\#16170](https://github.com/matrix-org/synapse/issues/16170)) +- Track per-device information in the presence code. ([\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172)) +- Stop using the `event_txn_id` table. ([\#16175](https://github.com/matrix-org/synapse/issues/16175)) +- Use `AsyncMock` instead of custom code. ([\#16179](https://github.com/matrix-org/synapse/issues/16179), [\#16180](https://github.com/matrix-org/synapse/issues/16180)) +- Improve error reporting of invalid data passed to `/_matrix/key/v2/query`. ([\#16183](https://github.com/matrix-org/synapse/issues/16183)) +- Task scheduler: add replication notify for new task to launch ASAP. ([\#16184](https://github.com/matrix-org/synapse/issues/16184)) +- Improve type hints. ([\#16186](https://github.com/matrix-org/synapse/issues/16186), [\#16188](https://github.com/matrix-org/synapse/issues/16188), [\#16201](https://github.com/matrix-org/synapse/issues/16201)) +- Bump black version to 23.7.0. ([\#16187](https://github.com/matrix-org/synapse/issues/16187)) +- Log the details of background update failures. ([\#16212](https://github.com/matrix-org/synapse/issues/16212)) +- Fix the latest-deps CI job. ([\#16213](https://github.com/matrix-org/synapse/issues/16213)) +- Fix typo where we ended up with multiple `WorkerLocksHandler`. ([\#16220](https://github.com/matrix-org/synapse/issues/16220)) +- Cache device resync requests over replication. ([\#16241](https://github.com/matrix-org/synapse/issues/16241)) + +### Updates to locked dependencies + +* Bump anyhow from 1.0.72 to 1.0.75. ([\#16141](https://github.com/matrix-org/synapse/issues/16141)) +* Bump furo from 2023.7.26 to 2023.8.19. ([\#16238](https://github.com/matrix-org/synapse/issues/16238)) +* Bump phonenumbers from 8.13.18 to 8.13.19. ([\#16237](https://github.com/matrix-org/synapse/issues/16237)) +* Bump psycopg2 from 2.9.6 to 2.9.7. ([\#16196](https://github.com/matrix-org/synapse/issues/16196)) +* Bump regex from 1.9.3 to 1.9.4. ([\#16195](https://github.com/matrix-org/synapse/issues/16195)) +* Bump ruff from 0.0.277 to 0.0.286. ([\#16198](https://github.com/matrix-org/synapse/issues/16198)) +* Bump sentry-sdk from 1.29.2 to 1.30.0. ([\#16236](https://github.com/matrix-org/synapse/issues/16236)) +* Bump serde from 1.0.184 to 1.0.188. ([\#16194](https://github.com/matrix-org/synapse/issues/16194)) +* Bump serde_json from 1.0.104 to 1.0.105. ([\#16140](https://github.com/matrix-org/synapse/issues/16140)) +* Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11. ([\#16200](https://github.com/matrix-org/synapse/issues/16200)) +* Bump types-pyyaml from 6.0.12.10 to 6.0.12.11. ([\#16199](https://github.com/matrix-org/synapse/issues/16199)) + # Synapse 1.91.1 (2023-09-04) ### Bugfixes diff --git a/changelog.d/15816.feature b/changelog.d/15816.feature deleted file mode 100644 index 9248dd6792..0000000000 --- a/changelog.d/15816.feature +++ /dev/null @@ -1 +0,0 @@ -Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. diff --git a/changelog.d/16008.doc b/changelog.d/16008.doc deleted file mode 100644 index 1142224951..0000000000 --- a/changelog.d/16008.doc +++ /dev/null @@ -1 +0,0 @@ -Update links to the matrix.org blog. diff --git a/changelog.d/16099.misc b/changelog.d/16099.misc deleted file mode 100644 index d0e2811366..0000000000 --- a/changelog.d/16099.misc +++ /dev/null @@ -1 +0,0 @@ -Prepare unit tests for Python 3.12. diff --git a/changelog.d/16113.feature b/changelog.d/16113.feature deleted file mode 100644 index 69fdaaebac..0000000000 --- a/changelog.d/16113.feature +++ /dev/null @@ -1 +0,0 @@ -Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). diff --git a/changelog.d/16121.misc b/changelog.d/16121.misc deleted file mode 100644 index f325d2a31d..0000000000 --- a/changelog.d/16121.misc +++ /dev/null @@ -1 +0,0 @@ -Attempt to fix the twisted trunk job. diff --git a/changelog.d/16135.misc b/changelog.d/16135.misc deleted file mode 100644 index cba8733d02..0000000000 --- a/changelog.d/16135.misc +++ /dev/null @@ -1 +0,0 @@ -Describe which rate limiter was hit in logs. diff --git a/changelog.d/16136.feature b/changelog.d/16136.feature deleted file mode 100644 index 4ad98a88c3..0000000000 --- a/changelog.d/16136.feature +++ /dev/null @@ -1 +0,0 @@ -Return a `Retry-After` with `M_LIMIT_EXCEEDED` error responses. diff --git a/changelog.d/16155.bugfix b/changelog.d/16155.bugfix deleted file mode 100644 index 8b2dc04006..0000000000 --- a/changelog.d/16155.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix IPv6-related bugs on SMTP settings, adding groundwork to fix similar issues. Contributed by @evilham and @telmich (ungleich.ch). diff --git a/changelog.d/16168.doc b/changelog.d/16168.doc deleted file mode 100644 index 7dadb047be..0000000000 --- a/changelog.d/16168.doc +++ /dev/null @@ -1 +0,0 @@ -Document which admin APIs are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. diff --git a/changelog.d/16170.misc b/changelog.d/16170.misc deleted file mode 100644 index c950b54367..0000000000 --- a/changelog.d/16170.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify presence code when using workers. diff --git a/changelog.d/16171.misc b/changelog.d/16171.misc deleted file mode 100644 index 4d709cb56e..0000000000 --- a/changelog.d/16171.misc +++ /dev/null @@ -1 +0,0 @@ -Track per-device information in the presence code. diff --git a/changelog.d/16172.misc b/changelog.d/16172.misc deleted file mode 100644 index 4d709cb56e..0000000000 --- a/changelog.d/16172.misc +++ /dev/null @@ -1 +0,0 @@ -Track per-device information in the presence code. diff --git a/changelog.d/16175.misc b/changelog.d/16175.misc deleted file mode 100644 index 308fbc2259..0000000000 --- a/changelog.d/16175.misc +++ /dev/null @@ -1 +0,0 @@ -Stop using the `event_txn_id` table. diff --git a/changelog.d/16178.doc b/changelog.d/16178.doc deleted file mode 100644 index ea21e19240..0000000000 --- a/changelog.d/16178.doc +++ /dev/null @@ -1 +0,0 @@ -Document `exclude_rooms_from_sync` configuration option. diff --git a/changelog.d/16179.misc b/changelog.d/16179.misc deleted file mode 100644 index 8d04954ab9..0000000000 --- a/changelog.d/16179.misc +++ /dev/null @@ -1 +0,0 @@ -Use `AsyncMock` instead of custom code. diff --git a/changelog.d/16180.misc b/changelog.d/16180.misc deleted file mode 100644 index 8d04954ab9..0000000000 --- a/changelog.d/16180.misc +++ /dev/null @@ -1 +0,0 @@ -Use `AsyncMock` instead of custom code. diff --git a/changelog.d/16183.misc b/changelog.d/16183.misc deleted file mode 100644 index 305d5baa6e..0000000000 --- a/changelog.d/16183.misc +++ /dev/null @@ -1 +0,0 @@ -Improve error reporting of invalid data passed to `/_matrix/key/v2/query`. diff --git a/changelog.d/16184.misc b/changelog.d/16184.misc deleted file mode 100644 index 3c0baddfe1..0000000000 --- a/changelog.d/16184.misc +++ /dev/null @@ -1 +0,0 @@ -Task scheduler: add replication notify for new task to launch ASAP. diff --git a/changelog.d/16185.bugfix b/changelog.d/16185.bugfix deleted file mode 100644 index e62c9c7a0d..0000000000 --- a/changelog.d/16185.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `include_all_networks` as a string. diff --git a/changelog.d/16186.misc b/changelog.d/16186.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16186.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16187.misc b/changelog.d/16187.misc deleted file mode 100644 index 989147274a..0000000000 --- a/changelog.d/16187.misc +++ /dev/null @@ -1 +0,0 @@ -Bump black version to 23.7.0. diff --git a/changelog.d/16188.misc b/changelog.d/16188.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16188.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16201.misc b/changelog.d/16201.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16201.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16205.bugfix b/changelog.d/16205.bugfix deleted file mode 100644 index 97ac92a148..0000000000 --- a/changelog.d/16205.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix inaccurate error message while attempting to ban or unban a user with the same or higher PL by spliting the conditional statements. Contributed by @leviosacz. \ No newline at end of file diff --git a/changelog.d/16210.bugfix b/changelog.d/16210.bugfix deleted file mode 100644 index 39c35a1fe1..0000000000 --- a/changelog.d/16210.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. diff --git a/changelog.d/16211.bugfix b/changelog.d/16211.bugfix deleted file mode 100644 index ab1816386c..0000000000 --- a/changelog.d/16211.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where uploading images would fail if we could not generate thumbnails for them. diff --git a/changelog.d/16212.misc b/changelog.d/16212.misc deleted file mode 100644 index 19cf9b102d..0000000000 --- a/changelog.d/16212.misc +++ /dev/null @@ -1 +0,0 @@ -Log the details of background update failures. diff --git a/changelog.d/16213.misc b/changelog.d/16213.misc deleted file mode 100644 index 8c14f5fd51..0000000000 --- a/changelog.d/16213.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the latest-deps CI job. diff --git a/changelog.d/16218.feature b/changelog.d/16218.feature deleted file mode 100644 index 4afd092e88..0000000000 --- a/changelog.d/16218.feature +++ /dev/null @@ -1 +0,0 @@ -Add `last_seen_ts` to the admin users API. diff --git a/changelog.d/16220.misc b/changelog.d/16220.misc deleted file mode 100644 index 329e9f76f6..0000000000 --- a/changelog.d/16220.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typo where we ended up with multiple `WorkerLocksHandler`. diff --git a/changelog.d/16221.bugfix b/changelog.d/16221.bugfix deleted file mode 100644 index 22678256e4..0000000000 --- a/changelog.d/16221.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. diff --git a/changelog.d/16223.feature b/changelog.d/16223.feature deleted file mode 100644 index a52d66658b..0000000000 --- a/changelog.d/16223.feature +++ /dev/null @@ -1 +0,0 @@ -Improve resource usage when sending data to a large number of remote hosts that are marked as "down". diff --git a/changelog.d/16241.misc b/changelog.d/16241.misc deleted file mode 100644 index 0fc5f34c5c..0000000000 --- a/changelog.d/16241.misc +++ /dev/null @@ -1 +0,0 @@ -Cache device resync requests over replication. diff --git a/debian/changelog b/debian/changelog index f737041567..2f870074e5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.92.0~rc1) stable; urgency=medium + + * New Synapse release 1.92.0rc1. + + -- Synapse Packaging team Tue, 05 Sep 2023 11:21:43 +0100 + matrix-synapse-py3 (1.91.1) stable; urgency=medium * New Synapse release 1.91.1. diff --git a/pyproject.toml b/pyproject.toml index 4585584723..c17f4da72d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.91.1" +version = "1.92.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From d77154be01920f0044ef767bd7ecc5e125ce8fcf Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 5 Sep 2023 11:28:49 +0100 Subject: [PATCH 413/562] Update changelog --- CHANGES.md | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index c13083c230..01bec0fe46 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ - Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. ([\#15816](https://github.com/matrix-org/synapse/issues/15816)) - Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). ([\#16113](https://github.com/matrix-org/synapse/issues/16113)) - Return a `Retry-After` with `M_LIMIT_EXCEEDED` error responses. ([\#16136](https://github.com/matrix-org/synapse/issues/16136)) -- Add `last_seen_ts` to the admin users API. ([\#16218](https://github.com/matrix-org/synapse/issues/16218)) +- Add `last_seen_ts` to the [admin users API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#16218](https://github.com/matrix-org/synapse/issues/16218)) - Improve resource usage when sending data to a large number of remote hosts that are marked as "down". ([\#16223](https://github.com/matrix-org/synapse/issues/16223)) ### Bugfixes @@ -13,20 +13,20 @@ - Fix IPv6-related bugs on SMTP settings, adding groundwork to fix similar issues. Contributed by @evilham and @telmich (ungleich.ch). ([\#16155](https://github.com/matrix-org/synapse/issues/16155)) - Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `include_all_networks` as a string. ([\#16185](https://github.com/matrix-org/synapse/issues/16185)) - Fix inaccurate error message while attempting to ban or unban a user with the same or higher PL by spliting the conditional statements. Contributed by @leviosacz. ([\#16205](https://github.com/matrix-org/synapse/issues/16205)) -- Fix rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. ([\#16210](https://github.com/matrix-org/synapse/issues/16210)) +- Fix a rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. ([\#16210](https://github.com/matrix-org/synapse/issues/16210)) - Fix a long-standing bug where uploading images would fail if we could not generate thumbnails for them. ([\#16211](https://github.com/matrix-org/synapse/issues/16211)) -- Fix long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. ([\#16221](https://github.com/matrix-org/synapse/issues/16221)) +- Fix a long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. ([\#16221](https://github.com/matrix-org/synapse/issues/16221)) ### Improved Documentation -- Update links to the matrix.org blog. ([\#16008](https://github.com/matrix-org/synapse/issues/16008)) -- Document which admin APIs are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. ([\#16168](https://github.com/matrix-org/synapse/issues/16168)) -- Document `exclude_rooms_from_sync` configuration option. ([\#16178](https://github.com/matrix-org/synapse/issues/16178)) +- Update links to the [matrix.org blog](https://matrix.org/blog/). ([\#16008](https://github.com/matrix-org/synapse/issues/16008)) +- Document which [admin APIs](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. ([\#16168](https://github.com/matrix-org/synapse/issues/16168)) +- Document [`exclude_rooms_from_sync`](https://matrix-org.github.io/synapse/v1.92/usage/configuration/config_documentation.html#exclude_rooms_from_sync) configuration option. ([\#16178](https://github.com/matrix-org/synapse/issues/16178)) ### Internal Changes - Prepare unit tests for Python 3.12. ([\#16099](https://github.com/matrix-org/synapse/issues/16099)) -- Attempt to fix the twisted trunk job. ([\#16121](https://github.com/matrix-org/synapse/issues/16121)) +- Fix nightly CI jobs. ([\#16121](https://github.com/matrix-org/synapse/issues/16121), [\#16213](https://github.com/matrix-org/synapse/issues/16213)) - Describe which rate limiter was hit in logs. ([\#16135](https://github.com/matrix-org/synapse/issues/16135)) - Simplify presence code when using workers. ([\#16170](https://github.com/matrix-org/synapse/issues/16170)) - Track per-device information in the presence code. ([\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172)) @@ -37,8 +37,6 @@ - Improve type hints. ([\#16186](https://github.com/matrix-org/synapse/issues/16186), [\#16188](https://github.com/matrix-org/synapse/issues/16188), [\#16201](https://github.com/matrix-org/synapse/issues/16201)) - Bump black version to 23.7.0. ([\#16187](https://github.com/matrix-org/synapse/issues/16187)) - Log the details of background update failures. ([\#16212](https://github.com/matrix-org/synapse/issues/16212)) -- Fix the latest-deps CI job. ([\#16213](https://github.com/matrix-org/synapse/issues/16213)) -- Fix typo where we ended up with multiple `WorkerLocksHandler`. ([\#16220](https://github.com/matrix-org/synapse/issues/16220)) - Cache device resync requests over replication. ([\#16241](https://github.com/matrix-org/synapse/issues/16241)) ### Updates to locked dependencies From 757010905ea85333672289a0ac124d41bd923bb3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 11:14:14 +0000 Subject: [PATCH 414/562] Bump twisted from 22.10.0 to 23.8.0 (#16235) * Bump twisted from 22.10.0 to 23.8.0 Bumps [twisted](https://github.com/twisted/twisted) from 22.10.0 to 23.8.0. - [Release notes](https://github.com/twisted/twisted/releases) - [Changelog](https://github.com/twisted/twisted/blob/trunk/NEWS.rst) - [Commits](https://github.com/twisted/twisted/compare/twisted-22.10.0...twisted-23.8.0) --- updated-dependencies: - dependency-name: twisted dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Fix types * Fix lint * Newsfile --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Erik Johnston --- changelog.d/16235.misc | 1 + poetry.lock | 37 ++++++++++++++++---------------- synapse/handlers/initial_sync.py | 8 ++----- synapse/logging/context.py | 4 ++-- synapse/util/gai_resolver.py | 2 +- 5 files changed, 24 insertions(+), 28 deletions(-) create mode 100644 changelog.d/16235.misc diff --git a/changelog.d/16235.misc b/changelog.d/16235.misc new file mode 100644 index 0000000000..b1533f93b6 --- /dev/null +++ b/changelog.d/16235.misc @@ -0,0 +1 @@ +Fix type checking when using the new version of Twisted. diff --git a/poetry.lock b/poetry.lock index 1cefabb358..872a863edc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2866,44 +2866,43 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "22.10.0" +version = "23.8.0" description = "An asynchronous networking framework written in Python" optional = false python-versions = ">=3.7.1" files = [ - {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"}, - {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"}, + {file = "twisted-23.8.0-py3-none-any.whl", hash = "sha256:b8bdba145de120ffb36c20e6e071cce984e89fba798611ed0704216fb7f884cd"}, + {file = "twisted-23.8.0.tar.gz", hash = "sha256:3c73360add17336a622c0d811c2a2ce29866b6e59b1125fd6509b17252098a24"}, ] [package.dependencies] -attrs = ">=19.2.0" -Automat = ">=0.8.0" +attrs = ">=21.3.0" +automat = ">=0.8.0" constantly = ">=15.1" hyperlink = ">=17.1.1" idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""} -incremental = ">=21.3.0" +incremental = ">=22.10.0" pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""} service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""} twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""} -typing-extensions = ">=3.6.5" -"zope.interface" = ">=4.4.2" +typing-extensions = ">=3.10.0" +zope-interface = ">=5" [package.extras] -all-non-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] -conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] -conch-nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"] +all-non-platform = ["twisted[conch,contextvars,http2,serial,test,tls]", "twisted[conch,contextvars,http2,serial,test,tls]"] +conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"] contextvars = ["contextvars (>=2.4,<3)"] -dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"] -dev-release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"] -gtk-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"] +dev-release = ["pydoctor (>=23.4.0,<23.5.0)", "pydoctor (>=23.4.0,<23.5.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "sphinx (>=5,<7)", "sphinx (>=5,<7)", "sphinx-rtd-theme (>=1.2,<2.0)", "sphinx-rtd-theme (>=1.2,<2.0)", "towncrier (>=22.12,<23.0)", "towncrier (>=22.12,<23.0)", "urllib3 (<2)", "urllib3 (<2)"] +gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] -macos-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] -mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"] -osx-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"] +mypy = ["mypy (==0.981)", "mypy-extensions (==0.4.3)", "mypy-zope (==0.3.11)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] +osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] -test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.0,<7.0)"] +test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"] tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"] -windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"] +windows-platform = ["pywin32 (!=226)", "pywin32 (!=226)", "twisted[all-non-platform]", "twisted[all-non-platform]"] [[package]] name = "twisted-iocpsupport" diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index b3be7a86f0..5dc76ef588 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.constants import ( AccountDataTypes, @@ -23,7 +23,6 @@ from synapse.api.constants import ( Membership, ) from synapse.api.errors import SynapseError -from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state @@ -35,7 +34,6 @@ from synapse.types import ( JsonDict, Requester, RoomStreamToken, - StateMap, StreamKeyType, StreamToken, UserID, @@ -199,9 +197,7 @@ class InitialSyncHandler: deferred_room_state = run_in_background( self._state_storage_controller.get_state_for_events, [event.event_id], - ).addCallback( - lambda states: cast(StateMap[EventBase], states[event.event_id]) - ) + ).addCallback(lambda states: states[event.event_id]) (messages, token), current_state = await make_deferred_yieldable( gather_results( diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 64c6ae4512..bf7e311026 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -728,7 +728,7 @@ async def _unwrap_awaitable(awaitable: Awaitable[R]) -> R: @overload -def preserve_fn( # type: ignore[misc] +def preserve_fn( f: Callable[P, Awaitable[R]], ) -> Callable[P, "defer.Deferred[R]"]: # The `type: ignore[misc]` above suppresses @@ -756,7 +756,7 @@ def preserve_fn( @overload -def run_in_background( # type: ignore[misc] +def run_in_background( f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": # The `type: ignore[misc]` above suppresses diff --git a/synapse/util/gai_resolver.py b/synapse/util/gai_resolver.py index 214eb17fbc..fecf829ade 100644 --- a/synapse/util/gai_resolver.py +++ b/synapse/util/gai_resolver.py @@ -136,7 +136,7 @@ class GAIResolver: # The types on IHostnameResolver is incorrect in Twisted, see # https://twistedmatrix.com/trac/ticket/10276 - def resolveHostName( # type: ignore[override] + def resolveHostName( self, resolutionReceiver: IResolutionReceiver, hostName: str, From dfcfa9f0eda11b339d51d73cd12167ed6e3c01f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Sep 2023 13:12:50 +0100 Subject: [PATCH 415/562] Bump minimum supported Rust version to 1.61.0 (#16248) --- .github/workflows/tests.yml | 18 +++++++++--------- changelog.d/16248.misc | 1 + docs/upgrade.md | 8 ++++++++ rust/Cargo.toml | 9 +++++++-- 4 files changed, 25 insertions(+), 11 deletions(-) create mode 100644 changelog.d/16248.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0a01e82984..fb117380d0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -35,7 +35,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -93,7 +93,7 @@ jobs: uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - name: Setup Poetry @@ -150,7 +150,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 with: @@ -167,7 +167,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 with: components: clippy - uses: Swatinem/rust-cache@v2 @@ -268,7 +268,7 @@ jobs: postgres:${{ matrix.job.postgres-version }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: matrix-org/setup-python-poetry@v1 @@ -308,7 +308,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 # There aren't wheels for some of the older deps, so we need to install @@ -416,7 +416,7 @@ jobs: run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - name: Run SyTest @@ -556,7 +556,7 @@ jobs: path: synapse - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - uses: actions/setup-go@v4 @@ -584,7 +584,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@1.60.0 + uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - run: cargo test diff --git a/changelog.d/16248.misc b/changelog.d/16248.misc new file mode 100644 index 0000000000..0a5ed6dccb --- /dev/null +++ b/changelog.d/16248.misc @@ -0,0 +1 @@ +Bump minimum supported Rust version to 1.61.0. diff --git a/docs/upgrade.md b/docs/upgrade.md index f50a279e98..2f888b6f12 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,14 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.93.0 + +## Minimum supported Rust version +The minimum supported Rust version has been increased from v1.60.0 to v1.61.0. +Users building from source will need to ensure their `rustc` version is up to +date. + + # Upgrading to v1.90.0 ## App service query parameter authorization is now a configuration option diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 3ead01c052..16917136db 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -7,7 +7,7 @@ name = "synapse" version = "0.1.0" edition = "2021" -rust-version = "1.60.0" +rust-version = "1.61.0" [lib] name = "synapse" @@ -23,7 +23,12 @@ name = "synapse.synapse_rust" anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] } +pyo3 = { version = "0.17.1", features = [ + "macros", + "anyhow", + "abi3", + "abi3-py37", +] } pyo3-log = "0.8.1" pythonize = "0.17.0" regex = "1.6.0" From 36ae8611fe98977153387308311f7d38b67e39c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 13:14:00 +0000 Subject: [PATCH 416/562] Bump regex from 1.9.4 to 1.9.5 (#16233) Bumps [regex](https://github.com/rust-lang/regex) from 1.9.4 to 1.9.5. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.9.4...1.9.5) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d60f8dcb6..95a713e437 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -138,9 +138,9 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", @@ -303,9 +303,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", From ea75346f6af8c182a42d1ca29119a10361693a7b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 5 Sep 2023 09:58:51 -0400 Subject: [PATCH 417/562] Track presence state per-device and combine to a user state. (#16066) Tracks presence on an individual per-device basis and combine the per-device state into a per-user state. This should help in situations where a user has multiple devices with conflicting status (e.g. one is syncing with unavailable and one is syncing with online). The tie-breaking is done by priority: BUSY > ONLINE > UNAVAILABLE > OFFLINE --- changelog.d/16066.bugfix | 1 + changelog.d/16170.bugfix | 1 + changelog.d/16170.misc | 1 - changelog.d/16171.bugfix | 1 + changelog.d/16171.misc | 1 - changelog.d/16172.bugfix | 1 + changelog.d/16172.misc | 1 - synapse/api/presence.py | 43 ++- synapse/handlers/presence.py | 277 +++++++++++++++--- tests/handlers/test_presence.py | 500 +++++++++++++++++++++++++++++++- 10 files changed, 764 insertions(+), 63 deletions(-) create mode 100644 changelog.d/16066.bugfix create mode 100644 changelog.d/16170.bugfix delete mode 100644 changelog.d/16170.misc create mode 100644 changelog.d/16171.bugfix delete mode 100644 changelog.d/16171.misc create mode 100644 changelog.d/16172.bugfix delete mode 100644 changelog.d/16172.misc diff --git a/changelog.d/16066.bugfix b/changelog.d/16066.bugfix new file mode 100644 index 0000000000..83649cf42a --- /dev/null +++ b/changelog.d/16066.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16170.bugfix b/changelog.d/16170.bugfix new file mode 100644 index 0000000000..83649cf42a --- /dev/null +++ b/changelog.d/16170.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16170.misc b/changelog.d/16170.misc deleted file mode 100644 index c950b54367..0000000000 --- a/changelog.d/16170.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify presence code when using workers. diff --git a/changelog.d/16171.bugfix b/changelog.d/16171.bugfix new file mode 100644 index 0000000000..83649cf42a --- /dev/null +++ b/changelog.d/16171.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16171.misc b/changelog.d/16171.misc deleted file mode 100644 index 4d709cb56e..0000000000 --- a/changelog.d/16171.misc +++ /dev/null @@ -1 +0,0 @@ -Track per-device information in the presence code. diff --git a/changelog.d/16172.bugfix b/changelog.d/16172.bugfix new file mode 100644 index 0000000000..83649cf42a --- /dev/null +++ b/changelog.d/16172.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16172.misc b/changelog.d/16172.misc deleted file mode 100644 index 4d709cb56e..0000000000 --- a/changelog.d/16172.misc +++ /dev/null @@ -1 +0,0 @@ -Track per-device information in the presence code. diff --git a/synapse/api/presence.py b/synapse/api/presence.py index b80aa83cb3..b78f419994 100644 --- a/synapse/api/presence.py +++ b/synapse/api/presence.py @@ -20,18 +20,53 @@ from synapse.api.constants import PresenceState from synapse.types import JsonDict +@attr.s(slots=True, auto_attribs=True) +class UserDevicePresenceState: + """ + Represents the current presence state of a user's device. + + user_id: The user ID. + device_id: The user's device ID. + state: The presence state, see PresenceState. + last_active_ts: Time in msec that the device last interacted with server. + last_sync_ts: Time in msec that the device last *completed* a sync + (or event stream). + """ + + user_id: str + device_id: Optional[str] + state: str + last_active_ts: int + last_sync_ts: int + + @classmethod + def default( + cls, user_id: str, device_id: Optional[str] + ) -> "UserDevicePresenceState": + """Returns a default presence state.""" + return cls( + user_id=user_id, + device_id=device_id, + state=PresenceState.OFFLINE, + last_active_ts=0, + last_sync_ts=0, + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) class UserPresenceState: """Represents the current presence state of the user. - user_id - last_active: Time in msec that the user last interacted with server. - last_federation_update: Time in msec since either a) we sent a presence + user_id: The user ID. + state: The presence state, see PresenceState. + last_active_ts: Time in msec that the user last interacted with server. + last_federation_update_ts: Time in msec since either a) we sent a presence update to other servers or b) we received a presence update, depending on if is a local user or not. - last_user_sync: Time in msec that the user last *completed* a sync + last_user_sync_ts: Time in msec that the user last *completed* a sync (or event stream). status_msg: User set status message. + currently_active: True if the user is currently syncing. """ user_id: str diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index f31e18328b..80190838b7 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -13,13 +13,56 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This module is responsible for keeping track of presence status of local +""" +This module is responsible for keeping track of presence status of local and remote users. The methods that define policy are: - PresenceHandler._update_states - PresenceHandler._handle_timeouts - should_notify + +# Tracking local presence + +For local users, presence is tracked on a per-device basis. When a user has multiple +devices the user presence state is derived by coalescing the presence from each +device: + + BUSY > ONLINE > UNAVAILABLE > OFFLINE + +The time that each device was last active and last synced is tracked in order to +automatically downgrade a device's presence state: + + A device may move from ONLINE -> UNAVAILABLE, if it has not been active for + a period of time. + + A device may go from any state -> OFFLINE, if it is not active and has not + synced for a period of time. + +The timeouts are handled using a wheel timer, which has coarse buckets. Timings +do not need to be exact. + +Generally a device's presence state is updated whenever a user syncs (via the +set_presence parameter), when the presence API is called, or if "pro-active" +events occur, including: + +* Sending an event, receipt, read marker. +* Updating typing status. + +The busy state has special status that it cannot is not downgraded by a call to +sync with a lower priority state *and* it takes a long period of time to transition +to offline. + +# Persisting (and restoring) presence + +For all users, presence is persisted on a per-user basis. Data is kept in-memory +and persisted periodically. When Synapse starts each worker loads the current +presence state and then tracks the presence stream to keep itself up-to-date. + +When restoring presence for local users a pseudo-device is created to match the +user state; this device follows the normal timeout logic (see above) and will +automatically be replaced with any information from currently available devices. + """ import abc import contextlib @@ -30,6 +73,7 @@ from contextlib import contextmanager from types import TracebackType from typing import ( TYPE_CHECKING, + AbstractSet, Any, Callable, Collection, @@ -49,7 +93,7 @@ from prometheus_client import Counter import synapse.metrics from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError -from synapse.api.presence import UserPresenceState +from synapse.api.presence import UserDevicePresenceState, UserPresenceState from synapse.appservice import ApplicationService from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background @@ -162,6 +206,7 @@ class BasePresenceHandler(abc.ABC): self.VALID_PRESENCE += (PresenceState.BUSY,) active_presence = self.store.take_presence_startup_info() + # The combined status across all user devices. self.user_to_current_state = {state.user_id: state for state in active_presence} @abc.abstractmethod @@ -708,9 +753,27 @@ class PresenceHandler(BasePresenceHandler): lambda: len(self.user_to_current_state), ) + # The per-device presence state, maps user to devices to per-device presence state. + self._user_to_device_to_current_state: Dict[ + str, Dict[Optional[str], UserDevicePresenceState] + ] = {} + now = self.clock.time_msec() if self._presence_enabled: for state in self.user_to_current_state.values(): + # Create a psuedo-device to properly handle time outs. This will + # be overridden by any "real" devices within SYNC_ONLINE_TIMEOUT. + pseudo_device_id = None + self._user_to_device_to_current_state[state.user_id] = { + pseudo_device_id: UserDevicePresenceState( + user_id=state.user_id, + device_id=pseudo_device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) + } + self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER ) @@ -752,7 +815,7 @@ class PresenceHandler(BasePresenceHandler): # Keeps track of the number of *ongoing* syncs on other processes. # - # While any sync is ongoing on another process the user will never + # While any sync is ongoing on another process the user's device will never # go offline. # # Each process has a unique identifier and an update frequency. If @@ -981,22 +1044,21 @@ class PresenceHandler(BasePresenceHandler): timers_fired_counter.inc(len(states)) - syncing_user_ids = { - user_id - for (user_id, _), count in self._user_device_to_num_current_syncs.items() + # Set of user ID & device IDs which are currently syncing. + syncing_user_devices = { + user_id_device_id + for user_id_device_id, count in self._user_device_to_num_current_syncs.items() if count } - syncing_user_ids.update( - user_id - for user_id, _ in itertools.chain( - *self.external_process_to_current_syncs.values() - ) + syncing_user_devices.update( + itertools.chain(*self.external_process_to_current_syncs.values()) ) changes = handle_timeouts( states, is_mine_fn=self.is_mine_id, - syncing_user_ids=syncing_user_ids, + syncing_user_devices=syncing_user_devices, + user_to_devices=self._user_to_device_to_current_state, now=now, ) @@ -1016,11 +1078,26 @@ class PresenceHandler(BasePresenceHandler): bump_active_time_counter.inc() - prev_state = await self.current_state_for_user(user_id) + now = self.clock.time_msec() - new_fields: Dict[str, Any] = {"last_active_ts": self.clock.time_msec()} - if prev_state.state == PresenceState.UNAVAILABLE: - new_fields["state"] = PresenceState.ONLINE + # Update the device information & mark the device as online if it was + # unavailable. + devices = self._user_to_device_to_current_state.setdefault(user_id, {}) + device_state = devices.setdefault( + device_id, + UserDevicePresenceState.default(user_id, device_id), + ) + device_state.last_active_ts = now + if device_state.state == PresenceState.UNAVAILABLE: + device_state.state = PresenceState.ONLINE + + # Update the user state, this will always update last_active_ts and + # might update the presence state. + prev_state = await self.current_state_for_user(user_id) + new_fields: Dict[str, Any] = { + "last_active_ts": now, + "state": _combine_device_states(devices.values()), + } await self._update_states([prev_state.copy_and_replace(**new_fields)]) @@ -1132,6 +1209,12 @@ class PresenceHandler(BasePresenceHandler): if is_syncing and (user_id, device_id) not in process_presence: process_presence.add((user_id, device_id)) elif not is_syncing and (user_id, device_id) in process_presence: + devices = self._user_to_device_to_current_state.setdefault(user_id, {}) + device_state = devices.setdefault( + device_id, UserDevicePresenceState.default(user_id, device_id) + ) + device_state.last_sync_ts = sync_time_msec + new_state = prev_state.copy_and_replace( last_user_sync_ts=sync_time_msec ) @@ -1151,11 +1234,24 @@ class PresenceHandler(BasePresenceHandler): process_presence = self.external_process_to_current_syncs.pop( process_id, set() ) - prev_states = await self.current_state_for_users( - {user_id for user_id, device_id in process_presence} - ) + time_now_ms = self.clock.time_msec() + # Mark each device as having a last sync time. + updated_users = set() + for user_id, device_id in process_presence: + device_state = self._user_to_device_to_current_state.setdefault( + user_id, {} + ).setdefault( + device_id, UserDevicePresenceState.default(user_id, device_id) + ) + + device_state.last_sync_ts = time_now_ms + updated_users.add(user_id) + + # Update each user (and insert into the appropriate timers to check if + # they've gone offline). + prev_states = await self.current_state_for_users(updated_users) await self._update_states( [ prev_state.copy_and_replace(last_user_sync_ts=time_now_ms) @@ -1277,6 +1373,20 @@ class PresenceHandler(BasePresenceHandler): if prev_state.state == PresenceState.BUSY and is_sync: presence = PresenceState.BUSY + # Update the device specific information. + devices = self._user_to_device_to_current_state.setdefault(user_id, {}) + device_state = devices.setdefault( + device_id, + UserDevicePresenceState.default(user_id, device_id), + ) + device_state.state = presence + device_state.last_active_ts = now + if is_sync: + device_state.last_sync_ts = now + + # Based on the state of each user's device calculate the new presence state. + presence = _combine_device_states(devices.values()) + new_fields = {"state": presence} if presence == PresenceState.ONLINE or presence == PresenceState.BUSY: @@ -1873,7 +1983,8 @@ class PresenceEventSource(EventSource[int, UserPresenceState]): def handle_timeouts( user_states: List[UserPresenceState], is_mine_fn: Callable[[str], bool], - syncing_user_ids: Set[str], + syncing_user_devices: AbstractSet[Tuple[str, Optional[str]]], + user_to_devices: Dict[str, Dict[Optional[str], UserDevicePresenceState]], now: int, ) -> List[UserPresenceState]: """Checks the presence of users that have timed out and updates as @@ -1882,7 +1993,8 @@ def handle_timeouts( Args: user_states: List of UserPresenceState's to check. is_mine_fn: Function that returns if a user_id is ours - syncing_user_ids: Set of user_ids with active syncs. + syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. + user_to_devices: A map of user ID to device ID to UserDevicePresenceState. now: Current time in ms. Returns: @@ -1891,9 +2003,16 @@ def handle_timeouts( changes = {} # Actual changes we need to notify people about for state in user_states: - is_mine = is_mine_fn(state.user_id) + user_id = state.user_id + is_mine = is_mine_fn(user_id) - new_state = handle_timeout(state, is_mine, syncing_user_ids, now) + new_state = handle_timeout( + state, + is_mine, + syncing_user_devices, + user_to_devices.get(user_id, {}), + now, + ) if new_state: changes[state.user_id] = new_state @@ -1901,14 +2020,19 @@ def handle_timeouts( def handle_timeout( - state: UserPresenceState, is_mine: bool, syncing_user_ids: Set[str], now: int + state: UserPresenceState, + is_mine: bool, + syncing_device_ids: AbstractSet[Tuple[str, Optional[str]]], + user_devices: Dict[Optional[str], UserDevicePresenceState], + now: int, ) -> Optional[UserPresenceState]: """Checks the presence of the user to see if any of the timers have elapsed Args: - state + state: UserPresenceState to check. is_mine: Whether the user is ours - syncing_user_ids: Set of user_ids with active syncs. + syncing_user_devices: A set of (user ID, device ID) tuples with active syncs.. + user_devices: A map of device ID to UserDevicePresenceState. now: Current time in ms. Returns: @@ -1919,34 +2043,55 @@ def handle_timeout( return None changed = False - user_id = state.user_id if is_mine: - if state.state == PresenceState.ONLINE: - if now - state.last_active_ts > IDLE_TIMER: - # Currently online, but last activity ages ago so auto - # idle - state = state.copy_and_replace(state=PresenceState.UNAVAILABLE) - changed = True - elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: - # So that we send down a notification that we've - # stopped updating. + # Check per-device whether the device should be considered idle or offline + # due to timeouts. + device_changed = False + offline_devices = [] + for device_id, device_state in user_devices.items(): + if device_state.state == PresenceState.ONLINE: + if now - device_state.last_active_ts > IDLE_TIMER: + # Currently online, but last activity ages ago so auto + # idle + device_state.state = PresenceState.UNAVAILABLE + device_changed = True + + # If there are have been no sync for a while (and none ongoing), + # set presence to offline. + if (state.user_id, device_id) not in syncing_device_ids: + # If the user has done something recently but hasn't synced, + # don't set them as offline. + sync_or_active = max( + device_state.last_sync_ts, device_state.last_active_ts + ) + + if now - sync_or_active > SYNC_ONLINE_TIMEOUT: + # Mark the device as going offline. + offline_devices.append(device_id) + device_changed = True + + # Offline devices are not needed and do not add information. + for device_id in offline_devices: + user_devices.pop(device_id) + + # If the presence state of the devices changed, then (maybe) update + # the user's overall presence state. + if device_changed: + new_presence = _combine_device_states(user_devices.values()) + if new_presence != state.state: + state = state.copy_and_replace(state=new_presence) changed = True + if now - state.last_active_ts > LAST_ACTIVE_GRANULARITY: + # So that we send down a notification that we've + # stopped updating. + changed = True + if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL: # Need to send ping to other servers to ensure they don't # timeout and set us to offline changed = True - - # If there are have been no sync for a while (and none ongoing), - # set presence to offline - if user_id not in syncing_user_ids: - # If the user has done something recently but hasn't synced, - # don't set them as offline. - sync_or_active = max(state.last_user_sync_ts, state.last_active_ts) - if now - sync_or_active > SYNC_ONLINE_TIMEOUT: - state = state.copy_and_replace(state=PresenceState.OFFLINE) - changed = True else: # We expect to be poked occasionally by the other side. # This is to protect against forgetful/buggy servers, so that @@ -2036,6 +2181,46 @@ def handle_update( return new_state, persist_and_notify, federation_ping +PRESENCE_BY_PRIORITY = { + PresenceState.BUSY: 4, + PresenceState.ONLINE: 3, + PresenceState.UNAVAILABLE: 2, + PresenceState.OFFLINE: 1, +} + + +def _combine_device_states( + device_states: Iterable[UserDevicePresenceState], +) -> str: + """ + Find the device to use presence information from. + + Orders devices by priority, then last_active_ts. + + Args: + device_states: An iterable of device presence states + + Return: + The combined presence state. + """ + + # Based on (all) the user's devices calculate the new presence state. + presence = PresenceState.OFFLINE + last_active_ts = -1 + + # Find the device to use the presence state of based on the presence priority, + # but tie-break with how recently the device has been seen. + for device_state in device_states: + if (PRESENCE_BY_PRIORITY[device_state.state], device_state.last_active_ts) > ( + PRESENCE_BY_PRIORITY[presence], + last_active_ts, + ): + presence = device_state.state + last_active_ts = device_state.last_active_ts + + return presence + + async def get_interested_parties( store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState] ) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]: diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 88a16193a3..914415740a 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -21,7 +21,7 @@ from signedjson.key import generate_signing_key from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, Membership, PresenceState -from synapse.api.presence import UserPresenceState +from synapse.api.presence import UserDevicePresenceState, UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events.builder import EventBuilder from synapse.federation.sender import FederationSender @@ -352,6 +352,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): def test_idle_timer(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -362,8 +363,21 @@ class PresenceTimeoutTestCase(unittest.TestCase): last_user_sync_ts=now, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) assert new_state is not None @@ -376,6 +390,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): presence state into unavailable. """ user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -386,8 +401,21 @@ class PresenceTimeoutTestCase(unittest.TestCase): last_user_sync_ts=now, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) assert new_state is not None @@ -396,6 +424,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): def test_sync_timeout(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -406,8 +435,21 @@ class PresenceTimeoutTestCase(unittest.TestCase): last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) assert new_state is not None @@ -416,6 +458,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): def test_sync_online(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -426,9 +469,20 @@ class PresenceTimeoutTestCase(unittest.TestCase): last_user_sync_ts=now - SYNC_ONLINE_TIMEOUT - 1, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) new_state = handle_timeout( - state, is_mine=True, syncing_user_ids={user_id}, now=now + state, + is_mine=True, + syncing_device_ids={(user_id, device_id)}, + user_devices={device_id: device_state}, + now=now, ) self.assertIsNotNone(new_state) @@ -438,6 +492,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): def test_federation_ping(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -449,14 +504,28 @@ class PresenceTimeoutTestCase(unittest.TestCase): last_federation_update_ts=now - FEDERATION_PING_INTERVAL - 1, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) self.assertEqual(state, new_state) def test_no_timeout(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" now = 5000000 state = UserPresenceState.default(user_id) @@ -466,8 +535,21 @@ class PresenceTimeoutTestCase(unittest.TestCase): last_user_sync_ts=now, last_federation_update_ts=now, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNone(new_state) @@ -485,8 +567,9 @@ class PresenceTimeoutTestCase(unittest.TestCase): status_msg=status_msg, ) + # Note that this is a remote user so we do not have their device information. new_state = handle_timeout( - state, is_mine=False, syncing_user_ids=set(), now=now + state, is_mine=False, syncing_device_ids=set(), user_devices={}, now=now ) self.assertIsNotNone(new_state) @@ -496,6 +579,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): def test_last_active(self) -> None: user_id = "@foo:bar" + device_id = "dev-1" status_msg = "I'm here!" now = 5000000 @@ -507,8 +591,21 @@ class PresenceTimeoutTestCase(unittest.TestCase): last_federation_update_ts=now, status_msg=status_msg, ) + device_state = UserDevicePresenceState( + user_id=user_id, + device_id=device_id, + state=state.state, + last_active_ts=state.last_active_ts, + last_sync_ts=state.last_user_sync_ts, + ) - new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) + new_state = handle_timeout( + state, + is_mine=True, + syncing_device_ids=set(), + user_devices={device_id: device_state}, + now=now, + ) self.assertIsNotNone(new_state) self.assertEqual(state, new_state) @@ -579,7 +676,7 @@ class PresenceHandlerInitTestCase(unittest.HomeserverTestCase): [ (PresenceState.BUSY, PresenceState.BUSY), (PresenceState.ONLINE, PresenceState.ONLINE), - (PresenceState.UNAVAILABLE, PresenceState.UNAVAILABLE), + (PresenceState.UNAVAILABLE, PresenceState.ONLINE), # Offline syncs don't update the state. (PresenceState.OFFLINE, PresenceState.ONLINE), ] @@ -800,6 +897,389 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # we should now be online self.assertEqual(state.state, PresenceState.ONLINE) + @parameterized.expand( + # A list of tuples of 4 strings: + # + # * The presence state of device 1. + # * The presence state of device 2. + # * The expected user presence state after both devices have synced. + # * The expected user presence state after device 1 has idled. + # * The expected user presence state after device 2 has idled. + # * True to use workers, False a monolith. + [ + (*cases, workers) + for workers in (False, True) + for cases in [ + # If both devices have the same state, online should eventually idle. + # Otherwise, the state doesn't change. + ( + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + ), + # If the second device has a "lower" state it should fallback to it. + ( + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.ONLINE, + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + # If the second device has a "higher" state it should override. + ( + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ] + ], + name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}", + ) + @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) + def test_set_presence_from_syncing_multi_device( + self, + dev_1_state: str, + dev_2_state: str, + expected_state_1: str, + expected_state_2: str, + expected_state_3: str, + test_with_workers: bool, + ) -> None: + """ + Test the behaviour of multiple devices syncing at the same time. + + Roughly the user's presence state should be set to the "highest" priority + of all the devices. When a device then goes offline its state should be + discarded and the next highest should win. + + Note that these tests use the idle timer (and don't close the syncs), it + is unlikely that a *single* sync would last this long, but is close enough + to continually syncing with that current state. + """ + user_id = f"@test:{self.hs.config.server.server_name}" + + # By default, we call /sync against the main process. + worker_presence_handler = self.presence_handler + if test_with_workers: + # Create a worker and use it to handle /sync traffic instead. + # This is used to test that presence changes get replicated from workers + # to the main process correctly. + worker_to_sync_against = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "synchrotron"} + ) + worker_presence_handler = worker_to_sync_against.get_presence_handler() + + # 1. Sync with the first device. + self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-1", + affect_presence=dev_1_state != PresenceState.OFFLINE, + presence_state=dev_1_state, + ), + by=0.01, + ) + + # 2. Wait half the idle timer. + self.reactor.advance(IDLE_TIMER / 1000 / 2) + self.reactor.pump([0.1]) + + # 3. Sync with the second device. + self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-2", + affect_presence=dev_2_state != PresenceState.OFFLINE, + presence_state=dev_2_state, + ), + by=0.01, + ) + + # 4. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + + # When testing with workers, make another random sync (with any *different* + # user) to keep the process information from expiring. + # + # This is due to EXTERNAL_PROCESS_EXPIRY being equivalent to IDLE_TIMER. + if test_with_workers: + with self.get_success( + worker_presence_handler.user_syncing( + f"@other-user:{self.hs.config.server.server_name}", + "dev-3", + affect_presence=True, + presence_state=PresenceState.ONLINE, + ), + by=0.01, + ): + pass + + # 5. Advance such that the first device should be discarded (the idle timer), + # then pump so _handle_timeouts function to called. + self.reactor.advance(IDLE_TIMER / 1000 / 2) + self.reactor.pump([0.01]) + + # 6. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + + # 7. Advance such that the second device should be discarded (half the idle timer), + # then pump so _handle_timeouts function to called. + self.reactor.advance(IDLE_TIMER / 1000 / 2) + self.reactor.pump([0.1]) + + # 8. The devices are still "syncing" (the sync context managers were never + # closed), so might idle. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_3) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_3) + + @parameterized.expand( + # A list of tuples of 4 strings: + # + # * The presence state of device 1. + # * The presence state of device 2. + # * The expected user presence state after both devices have synced. + # * The expected user presence state after device 1 has stopped syncing. + # * True to use workers, False a monolith. + [ + (*cases, workers) + for workers in (False, True) + for cases in [ + # If both devices have the same state, nothing exciting should happen. + ( + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + PresenceState.OFFLINE, + ), + # If the second device has a "lower" state it should fallback to it. + ( + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + ), + ( + PresenceState.ONLINE, + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.OFFLINE, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + ), + # If the second device has a "higher" state it should override. + ( + PresenceState.UNAVAILABLE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + ), + ( + PresenceState.OFFLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + PresenceState.ONLINE, + ), + ( + PresenceState.OFFLINE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + PresenceState.UNAVAILABLE, + ), + ] + ], + name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}", + ) + @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) + def test_set_presence_from_non_syncing_multi_device( + self, + dev_1_state: str, + dev_2_state: str, + expected_state_1: str, + expected_state_2: str, + test_with_workers: bool, + ) -> None: + """ + Test the behaviour of multiple devices syncing at the same time. + + Roughly the user's presence state should be set to the "highest" priority + of all the devices. When a device then goes offline its state should be + discarded and the next highest should win. + + Note that these tests use the idle timer (and don't close the syncs), it + is unlikely that a *single* sync would last this long, but is close enough + to continually syncing with that current state. + """ + user_id = f"@test:{self.hs.config.server.server_name}" + + # By default, we call /sync against the main process. + worker_presence_handler = self.presence_handler + if test_with_workers: + # Create a worker and use it to handle /sync traffic instead. + # This is used to test that presence changes get replicated from workers + # to the main process correctly. + worker_to_sync_against = self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "synchrotron"} + ) + worker_presence_handler = worker_to_sync_against.get_presence_handler() + + # 1. Sync with the first device. + sync_1 = self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-1", + affect_presence=dev_1_state != PresenceState.OFFLINE, + presence_state=dev_1_state, + ), + by=0.1, + ) + + # 2. Sync with the second device. + sync_2 = self.get_success( + worker_presence_handler.user_syncing( + user_id, + "dev-2", + affect_presence=dev_2_state != PresenceState.OFFLINE, + presence_state=dev_2_state, + ), + by=0.1, + ) + + # 3. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_1) + + # 4. Disconnect the first device. + with sync_1: + pass + + # 5. Advance such that the first device should be discarded (the sync timeout), + # then pump so _handle_timeouts function to called. + self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000) + self.reactor.pump([5]) + + # 6. Assert the expected presence state. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, expected_state_2) + + # 7. Disconnect the second device. + with sync_2: + pass + + # 8. Advance such that the second device should be discarded (the sync timeout), + # then pump so _handle_timeouts function to called. + self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000) + self.reactor.pump([5]) + + # 9. There are no more devices, should be offline. + state = self.get_success( + self.presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.OFFLINE) + if test_with_workers: + state = self.get_success( + worker_presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.OFFLINE) + def test_set_presence_from_syncing_keeps_status(self) -> None: """Test that presence set by syncing retains status message""" status_msg = "I'm here!" From 8b5013dcbc5db16f0f771898da493e812be6fc8a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 5 Sep 2023 10:39:38 -0400 Subject: [PATCH 418/562] Time out busy presence status & test multi-device busy (#16174) Add a (long) timeout to when a "busy" device is considered not online. This does *not* match MSC3026, but is a reasonable thing for an implementation to do. Expands tests for the (unstable) busy presence with multiple devices. --- changelog.d/16174.bugfix | 1 + synapse/handlers/presence.py | 19 +++++- tests/handlers/test_presence.py | 104 +++++++++++++++++++++++++++++++- 3 files changed, 120 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16174.bugfix diff --git a/changelog.d/16174.bugfix b/changelog.d/16174.bugfix new file mode 100644 index 0000000000..83649cf42a --- /dev/null +++ b/changelog.d/16174.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 80190838b7..a4b05b72e7 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -155,6 +155,8 @@ LAST_ACTIVE_GRANULARITY = 60 * 1000 # How long to wait until a new /events or /sync request before assuming # the client has gone. SYNC_ONLINE_TIMEOUT = 30 * 1000 +# Busy status waits longer, but does eventually go offline. +BUSY_ONLINE_TIMEOUT = 60 * 60 * 1000 # How long to wait before marking the user as idle. Compared against last active IDLE_TIMER = 5 * 60 * 1000 @@ -2066,7 +2068,15 @@ def handle_timeout( device_state.last_sync_ts, device_state.last_active_ts ) - if now - sync_or_active > SYNC_ONLINE_TIMEOUT: + # Implementations aren't meant to timeout a device with a busy + # state, but it needs to timeout *eventually* or else the user + # will be stuck in that state. + online_timeout = ( + BUSY_ONLINE_TIMEOUT + if device_state.state == PresenceState.BUSY + else SYNC_ONLINE_TIMEOUT + ) + if now - sync_or_active > online_timeout: # Mark the device as going offline. offline_devices.append(device_id) device_changed = True @@ -2166,6 +2176,13 @@ def handle_update( new_state = new_state.copy_and_replace(last_federation_update_ts=now) federation_ping = True + if new_state.state == PresenceState.BUSY: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + BUSY_ONLINE_TIMEOUT, + ) + else: wheel_timer.insert( now=now, diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 914415740a..638787b029 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -26,6 +26,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events.builder import EventBuilder from synapse.federation.sender import FederationSender from synapse.handlers.presence import ( + BUSY_ONLINE_TIMEOUT, EXTERNAL_PROCESS_EXPIRY, FEDERATION_PING_INTERVAL, FEDERATION_TIMEOUT, @@ -912,6 +913,13 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): for cases in [ # If both devices have the same state, online should eventually idle. # Otherwise, the state doesn't change. + ( + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.ONLINE, @@ -933,7 +941,29 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): PresenceState.OFFLINE, PresenceState.OFFLINE, ), - # If the second device has a "lower" state it should fallback to it. + # If the second device has a "lower" state it should fallback to it, + # except for "busy" which overrides. + ( + PresenceState.BUSY, + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, @@ -956,6 +986,27 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): PresenceState.UNAVAILABLE, ), # If the second device has a "higher" state it should override. + ( + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.UNAVAILABLE, PresenceState.ONLINE, @@ -1114,6 +1165,12 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): for workers in (False, True) for cases in [ # If both devices have the same state, nothing exciting should happen. + ( + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.ONLINE, @@ -1132,7 +1189,26 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): PresenceState.OFFLINE, PresenceState.OFFLINE, ), - # If the second device has a "lower" state it should fallback to it. + # If the second device has a "lower" state it should fallback to it, + # except for "busy" which overrides. + ( + PresenceState.BUSY, + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.BUSY, + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.ONLINE, PresenceState.UNAVAILABLE, @@ -1152,6 +1228,24 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): PresenceState.OFFLINE, ), # If the second device has a "higher" state it should override. + ( + PresenceState.ONLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.UNAVAILABLE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), + ( + PresenceState.OFFLINE, + PresenceState.BUSY, + PresenceState.BUSY, + PresenceState.BUSY, + ), ( PresenceState.UNAVAILABLE, PresenceState.ONLINE, @@ -1266,7 +1360,11 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): # 8. Advance such that the second device should be discarded (the sync timeout), # then pump so _handle_timeouts function to called. - self.reactor.advance(SYNC_ONLINE_TIMEOUT / 1000) + if dev_1_state == PresenceState.BUSY or dev_2_state == PresenceState.BUSY: + timeout = BUSY_ONLINE_TIMEOUT + else: + timeout = SYNC_ONLINE_TIMEOUT + self.reactor.advance(timeout / 1000) self.reactor.pump([5]) # 9. There are no more devices, should be offline. From 79aa26936f87356e192093bd4e598ea1b6fc65e4 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 5 Sep 2023 16:45:20 +0100 Subject: [PATCH 419/562] Amend changelog for MSC4041 --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 01bec0fe46..fcadcd5210 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ - Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. ([\#15816](https://github.com/matrix-org/synapse/issues/15816)) - Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). ([\#16113](https://github.com/matrix-org/synapse/issues/16113)) -- Return a `Retry-After` with `M_LIMIT_EXCEEDED` error responses. ([\#16136](https://github.com/matrix-org/synapse/issues/16136)) +- Experimental support for [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041): return a `Retry-After` header with `M_LIMIT_EXCEEDED` error responses. ([\#16136](https://github.com/matrix-org/synapse/issues/16136)) - Add `last_seen_ts` to the [admin users API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#16218](https://github.com/matrix-org/synapse/issues/16218)) - Improve resource usage when sending data to a large number of remote hosts that are marked as "down". ([\#16223](https://github.com/matrix-org/synapse/issues/16223)) From a2b8814d64714e00acee662d81206944a9a6a56d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 5 Sep 2023 12:11:05 -0400 Subject: [PATCH 420/562] Fix incorrect docstring for Ratelimiter. (#16255) --- changelog.d/16255.misc | 1 + synapse/api/ratelimiting.py | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16255.misc diff --git a/changelog.d/16255.misc b/changelog.d/16255.misc new file mode 100644 index 0000000000..94d6aff1d6 --- /dev/null +++ b/changelog.d/16255.misc @@ -0,0 +1 @@ +Fix incorrect docstring for `Ratelimiter`. diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 887b214d64..02ae45e8b3 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -40,7 +40,7 @@ class Ratelimiter: - the cost C of this request in tokens. Then, if there is room in the bucket for C tokens (T + C <= `burst_count`), the request is permitted and `cost` tokens are added to the bucket. - Otherwise the request is denied, and the bucket continues to hold T tokens. + Otherwise, the request is denied, and the bucket continues to hold T tokens. This means that the limiter enforces an average request frequency of `rate_hz`, while accumulating a buffer of up to `burst_count` requests which can be consumed @@ -55,9 +55,10 @@ class Ratelimiter: request. Args: + store: The datastore providing get_ratelimit_for_user. clock: A homeserver clock, for retrieving the current time - rate_hz: The long term number of actions that can be performed in a second. - burst_count: How many actions that can be performed before being limited. + cfg: The ratelimit configuration for this rate limiter including the + allowed rate and burst count. """ def __init__( From c9cec2daed00406b5337a8ce7064e3394ceaf656 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Sep 2023 20:27:41 +0100 Subject: [PATCH 421/562] Fix bug where we kept re-requesting a remote server's key repeatedly. (#16257) * Correctly handle multiple rows per server/key * Newsfile --- changelog.d/16257.bugfix | 1 + synapse/storage/databases/main/keys.py | 17 +++++++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16257.bugfix diff --git a/changelog.d/16257.bugfix b/changelog.d/16257.bugfix new file mode 100644 index 0000000000..28a5319749 --- /dev/null +++ b/changelog.d/16257.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index a3b4744855..57aa4921e1 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -221,12 +221,17 @@ class KeyStore(CacheInvalidationWorkerStore): """Processes a batch of keys to fetch, and adds the result to `keys`.""" # batch_iter always returns tuples so it's safe to do len(batch) - sql = """ - SELECT server_name, key_id, key_json, ts_valid_until_ms - FROM server_keys_json WHERE 1=0 - """ + " OR (server_name=? AND key_id=?)" * len( - batch - ) + where_clause = " OR (server_name=? AND key_id=?)" * len(batch) + + # `server_keys_json` can have multiple entries per server (one per + # remote server we fetched from, if using perspectives). Order by + # `ts_added_ms` so the most recently fetched one always wins. + sql = f""" + SELECT server_name, key_id, key_json, ts_valid_until_ms + FROM server_keys_json WHERE 1=0 + {where_clause} + ORDER BY ts_added_ms + """ txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) From b1d71c687ae55ce67e4cfc82c475e61f959dfeb0 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 5 Sep 2023 13:45:39 -0600 Subject: [PATCH 422/562] Add MSC4040 `matrix-fed` service lookups (#16137) --- changelog.d/16137.feature | 1 + scripts-dev/federation_client.py | 12 + .../federation/matrix_federation_agent.py | 29 +- .../test_matrix_federation_agent.py | 323 ++++++++++++++++-- 4 files changed, 331 insertions(+), 34 deletions(-) create mode 100644 changelog.d/16137.feature diff --git a/changelog.d/16137.feature b/changelog.d/16137.feature new file mode 100644 index 0000000000..bba6f161cd --- /dev/null +++ b/changelog.d/16137.feature @@ -0,0 +1 @@ +Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index 5ad334b4d8..e8baeac5e2 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -329,6 +329,17 @@ class MatrixConnectionAdapter(HTTPAdapter): raise ValueError("Invalid host:port '%s'" % (server_name,)) return out[0], port, out[0] + # Look up SRV for Matrix 1.8 `matrix-fed` service first + try: + srv = srvlookup.lookup("matrix-fed", "tcp", server_name)[0] + print( + f"SRV lookup on _matrix-fed._tcp.{server_name} gave {srv}", + file=sys.stderr, + ) + return srv.host, srv.port, server_name + except Exception: + pass + # Fall back to deprecated `matrix` service try: srv = srvlookup.lookup("matrix", "tcp", server_name)[0] print( @@ -337,6 +348,7 @@ class MatrixConnectionAdapter(HTTPAdapter): ) return srv.host, srv.port, server_name except Exception: + # Fall even further back to just port 8448 return server_name, 8448, server_name @staticmethod diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 91a24efcd0..a3a396bb37 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -399,15 +399,34 @@ class MatrixHostnameEndpoint: if port or _is_ip_literal(host): return [Server(host, port or 8448)] + # Check _matrix-fed._tcp SRV record. logger.debug("Looking up SRV record for %s", host.decode(errors="replace")) + server_list = await self._srv_resolver.resolve_service( + b"_matrix-fed._tcp." + host + ) + + if server_list: + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + "Got %s from SRV lookup for %s", + ", ".join(map(str, server_list)), + host.decode(errors="replace"), + ) + return server_list + + # No _matrix-fed._tcp SRV record, fallback to legacy _matrix._tcp SRV record. + logger.debug( + "Looking up deprecated SRV record for %s", host.decode(errors="replace") + ) server_list = await self._srv_resolver.resolve_service(b"_matrix._tcp." + host) if server_list: - logger.debug( - "Got %s from SRV lookup for %s", - ", ".join(map(str, server_list)), - host.decode(errors="replace"), - ) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + "Got %s from deprecated SRV lookup for %s", + ", ".join(map(str, server_list)), + host.decode(errors="replace"), + ) return server_list # No SRV records, so we fallback to host and 8448 diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 0d17f2fe5b..9f63fa6fa8 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -15,7 +15,7 @@ import base64 import logging import os from typing import Generator, List, Optional, cast -from unittest.mock import AsyncMock, patch +from unittest.mock import AsyncMock, call, patch import treq from netaddr import IPSet @@ -651,9 +651,9 @@ class MatrixFederationAgentTests(unittest.TestCase): # .well-known request fails. self.reactor.pump((0.4,)) - # now there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv1" + # now there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv1"), call(b"_matrix._tcp.testserv1")] ) # we should fall back to a direct connection @@ -737,9 +737,9 @@ class MatrixFederationAgentTests(unittest.TestCase): # .well-known request fails. self.reactor.pump((0.4,)) - # now there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + # now there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) # we should fall back to a direct connection @@ -788,9 +788,12 @@ class MatrixFederationAgentTests(unittest.TestCase): content=b'{ "m.server": "target-server" }', ) - # there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server" + # there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.target-server"), + call(b"_matrix._tcp.target-server"), + ] ) # now we should get a connection to the target server @@ -878,9 +881,12 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.pump((0.1,)) - # there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server" + # there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.target-server"), + call(b"_matrix._tcp.target-server"), + ] ) # now we should get a connection to the target server @@ -942,9 +948,9 @@ class MatrixFederationAgentTests(unittest.TestCase): client_factory, expected_sni=b"testserv", content=b"NOT JSON" ) - # now there should be a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + # now there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) # we should fall back to a direct connection @@ -1016,14 +1022,14 @@ class MatrixFederationAgentTests(unittest.TestCase): # there should be no requests self.assertEqual(len(http_proto.requests), 0) - # and there should be a SRV lookup instead - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + # and there should be two SRV lookups instead + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) def test_get_hostname_srv(self) -> None: """ - Test the behaviour when there is a single SRV record + Test the behaviour when there is a single SRV record for _matrix-fed. """ self.agent = self._make_agent() @@ -1039,7 +1045,51 @@ class MatrixFederationAgentTests(unittest.TestCase): # the request for a .well-known will have failed with a DNS lookup error. self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + b"_matrix-fed._tcp.testserv" + ) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=b"testserv") + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"]) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_get_hostname_srv_legacy(self) -> None: + """ + Test the behaviour when there is a single SRV record for _matrix. + """ + self.agent = self._make_agent() + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [Server(host=b"srvtarget", port=8443)], + ] + self.reactor.lookups["srvtarget"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # the request for a .well-known will have failed with a DNS lookup error. + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] ) # Make sure treq is trying to connect @@ -1065,7 +1115,7 @@ class MatrixFederationAgentTests(unittest.TestCase): def test_get_well_known_srv(self) -> None: """Test the behaviour when the .well-known redirects to a place where there - is a SRV. + is a _matrix-fed SRV record. """ self.agent = self._make_agent() @@ -1096,7 +1146,72 @@ class MatrixFederationAgentTests(unittest.TestCase): # there should be a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server" + b"_matrix-fed._tcp.target-server" + ) + + # now we should get a connection to the target of the SRV record + self.assertEqual(len(clients), 2) + (host, port, client_factory, _timeout, _bindAddress) = clients[1] + self.assertEqual(host, "5.6.7.8") + self.assertEqual(port, 8443) + + # make a test server, and wire up the client + http_server = self._make_connection( + client_factory, expected_sni=b"target-server" + ) + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual( + request.requestHeaders.getRawHeaders(b"host"), [b"target-server"] + ) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_get_well_known_srv_legacy(self) -> None: + """Test the behaviour when the .well-known redirects to a place where there + is a _matrix SRV record. + """ + self.agent = self._make_agent() + + self.reactor.lookups["testserv"] = "1.2.3.4" + self.reactor.lookups["srvtarget"] = "5.6.7.8" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # there should be an attempt to connect on port 443 for the .well-known + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 443) + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [Server(host=b"srvtarget", port=8443)], + ] + + self._handle_well_known_connection( + client_factory, + expected_sni=b"testserv", + content=b'{ "m.server": "target-server" }', + ) + + # there should be two SRV lookups + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.target-server"), + call(b"_matrix._tcp.target-server"), + ] ) # now we should get a connection to the target of the SRV record @@ -1158,8 +1273,11 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.pump((0.4,)) # now there should have been a SRV lookup - self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.xn--bcher-kva.com" + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.xn--bcher-kva.com"), + call(b"_matrix._tcp.xn--bcher-kva.com"), + ] ) # We should fall back to port 8448 @@ -1188,7 +1306,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.successResultOf(test_d) def test_idna_srv_target(self) -> None: - """test the behaviour when the target of a SRV record has idna chars""" + """test the behaviour when the target of a _matrix-fed SRV record has idna chars""" self.agent = self._make_agent() self.mock_resolver.resolve_service.return_value = [ @@ -1204,7 +1322,57 @@ class MatrixFederationAgentTests(unittest.TestCase): self.assertNoResult(test_d) self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.xn--bcher-kva.com" + b"_matrix-fed._tcp.xn--bcher-kva.com" + ) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # make a test server, and wire up the client + http_server = self._make_connection( + client_factory, expected_sni=b"xn--bcher-kva.com" + ) + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual( + request.requestHeaders.getRawHeaders(b"host"), [b"xn--bcher-kva.com"] + ) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_idna_srv_target_legacy(self) -> None: + """test the behaviour when the target of a _matrix SRV record has idna chars""" + self.agent = self._make_agent() + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [Server(host=b"xn--trget-3qa.com", port=8443)], + ] # târget.com + self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4" + + test_d = self._make_get_request( + b"matrix-federation://xn--bcher-kva.com/foo/bar" + ) + + # Nothing happened yet + self.assertNoResult(test_d) + + self.mock_resolver.resolve_service.assert_has_calls( + [ + call(b"_matrix-fed._tcp.xn--bcher-kva.com"), + call(b"_matrix._tcp.xn--bcher-kva.com"), + ] ) # Make sure treq is trying to connect @@ -1394,7 +1562,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.assertIsNone(r.delegated_server) def test_srv_fallbacks(self) -> None: - """Test that other SRV results are tried if the first one fails.""" + """Test that other SRV results are tried if the first one fails for _matrix-fed SRV.""" self.agent = self._make_agent() self.mock_resolver.resolve_service.return_value = [ @@ -1409,7 +1577,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.assertNoResult(test_d) self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv" + b"_matrix-fed._tcp.testserv" ) # We should see an attempt to connect to the first server @@ -1449,6 +1617,103 @@ class MatrixFederationAgentTests(unittest.TestCase): self.reactor.pump((0.1,)) self.successResultOf(test_d) + def test_srv_fallbacks_legacy(self) -> None: + """Test that other SRV results are tried if the first one fails for _matrix SRV.""" + self.agent = self._make_agent() + + # Return no entries for the _matrix-fed lookup, and a response for _matrix. + self.mock_resolver.resolve_service.side_effect = [ + [], + [ + Server(host=b"target.com", port=8443), + Server(host=b"target.com", port=8444), + ], + ] + self.reactor.lookups["target.com"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + self.mock_resolver.resolve_service.assert_has_calls( + [call(b"_matrix-fed._tcp.testserv"), call(b"_matrix._tcp.testserv")] + ) + + # We should see an attempt to connect to the first server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # Fonx the connection + client_factory.clientConnectionFailed(None, Exception("nope")) + + # There's a 300ms delay in HostnameEndpoint + self.reactor.pump((0.4,)) + + # Hasn't failed yet + self.assertNoResult(test_d) + + # We shouldnow see an attempt to connect to the second server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8444) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=b"testserv") + + self.assertEqual(len(http_server.requests), 1) + request = http_server.requests[0] + self.assertEqual(request.method, b"GET") + self.assertEqual(request.path, b"/foo/bar") + self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"]) + + # finish the request + request.finish() + self.reactor.pump((0.1,)) + self.successResultOf(test_d) + + def test_srv_no_fallback_to_legacy(self) -> None: + """Test that _matrix SRV results are not tried if the _matrix-fed one fails.""" + self.agent = self._make_agent() + + # Return a failing entry for _matrix-fed. + self.mock_resolver.resolve_service.side_effect = [ + [Server(host=b"target.com", port=8443)], + [], + ] + self.reactor.lookups["target.com"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # Only the _matrix-fed is checked, _matrix is ignored. + self.mock_resolver.resolve_service.assert_called_once_with( + b"_matrix-fed._tcp.testserv" + ) + + # We should see an attempt to connect to the first server + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8443) + + # Fonx the connection + client_factory.clientConnectionFailed(None, Exception("nope")) + + # There's a 300ms delay in HostnameEndpoint + self.reactor.pump((0.4,)) + + # Failed to resolve a server. + self.assertFailure(test_d, Exception) + class TestCachePeriodFromHeaders(unittest.TestCase): def test_cache_control(self) -> None: From 1e571cd66437ea2455c203dafb94c20ba48cdcc1 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Tue, 5 Sep 2023 20:46:57 +0100 Subject: [PATCH 423/562] Fix appservices being unable to handle to_device messages for multiple users (#16251) --- changelog.d/16251.bugfix | 1 + synapse/storage/databases/main/deviceinbox.py | 2 +- tests/handlers/test_appservice.py | 125 ++++++++++++++++++ 3 files changed, 127 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16251.bugfix diff --git a/changelog.d/16251.bugfix b/changelog.d/16251.bugfix new file mode 100644 index 0000000000..6d3157c7aa --- /dev/null +++ b/changelog.d/16251.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. \ No newline at end of file diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index b471fcb064..271cdf923c 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -349,7 +349,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): table="devices", column="user_id", iterable=user_ids_to_query, - keyvalues={"user_id": user_id, "hidden": False}, + keyvalues={"hidden": False}, retcols=("device_id",), ) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 46d022092e..a7e6cdd66a 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -422,6 +422,18 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): "exclusive_as_user", "password", self.exclusive_as_user_device_id ) + self.exclusive_as_user_2_device_id = "exclusive_as_device_2" + self.exclusive_as_user_2 = self.register_user("exclusive_as_user_2", "password") + self.exclusive_as_user_2_token = self.login( + "exclusive_as_user_2", "password", self.exclusive_as_user_2_device_id + ) + + self.exclusive_as_user_3_device_id = "exclusive_as_device_3" + self.exclusive_as_user_3 = self.register_user("exclusive_as_user_3", "password") + self.exclusive_as_user_3_token = self.login( + "exclusive_as_user_3", "password", self.exclusive_as_user_3_device_id + ) + def _notify_interested_services(self) -> None: # This is normally set in `notify_interested_services` but we need to call the # internal async version so the reactor gets pushed to completion. @@ -849,6 +861,119 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): for count in service_id_to_message_count.values(): self.assertEqual(count, number_of_messages) + @unittest.override_config( + {"experimental_features": {"msc2409_to_device_messages_enabled": True}} + ) + def test_application_services_receive_local_to_device_for_many_users(self) -> None: + """ + Test that when a user sends a to-device message to many users + in an application service's user namespace, the + application service will receive all of them. + """ + interested_appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": "@exclusive_as_user:.+", + "exclusive": True, + }, + { + "regex": "@exclusive_as_user_2:.+", + "exclusive": True, + }, + { + "regex": "@exclusive_as_user_3:.+", + "exclusive": True, + }, + ], + }, + ) + + # Have local_user send a to-device message to exclusive_as_users + message_content = {"some_key": "some really interesting value"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/3", + content={ + "messages": { + self.exclusive_as_user: { + self.exclusive_as_user_device_id: message_content + }, + self.exclusive_as_user_2: { + self.exclusive_as_user_2_device_id: message_content + }, + self.exclusive_as_user_3: { + self.exclusive_as_user_3_device_id: message_content + }, + } + }, + access_token=self.local_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Have exclusive_as_user send a to-device message to local_user + for user_token in [ + self.exclusive_as_user_token, + self.exclusive_as_user_2_token, + self.exclusive_as_user_3_token, + ]: + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/4", + content={ + "messages": { + self.local_user: {self.local_user_device_id: message_content} + } + }, + access_token=user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Check if our application service - that is interested in exclusive_as_user - received + # the to-device message as part of an AS transaction. + # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS. + # + # The uninterested application service should not have been notified at all. + self.send_mock.assert_called_once() + ( + service, + _events, + _ephemeral, + to_device_messages, + _otks, + _fbks, + _device_list_summary, + ) = self.send_mock.call_args[0] + + # Assert that this was the same to-device message that local_user sent + self.assertEqual(service, interested_appservice) + + # Assert expected number of messages + self.assertEqual(len(to_device_messages), 3) + + for device_msg in to_device_messages: + self.assertEqual(device_msg["type"], "m.room_key_request") + self.assertEqual(device_msg["sender"], self.local_user) + self.assertEqual(device_msg["content"], message_content) + + self.assertEqual(to_device_messages[0]["to_user_id"], self.exclusive_as_user) + self.assertEqual( + to_device_messages[0]["to_device_id"], + self.exclusive_as_user_device_id, + ) + + self.assertEqual(to_device_messages[1]["to_user_id"], self.exclusive_as_user_2) + self.assertEqual( + to_device_messages[1]["to_device_id"], + self.exclusive_as_user_2_device_id, + ) + + self.assertEqual(to_device_messages[2]["to_user_id"], self.exclusive_as_user_3) + self.assertEqual( + to_device_messages[2]["to_device_id"], + self.exclusive_as_user_3_device_id, + ) + def _register_application_service( self, namespaces: Optional[Dict[str, Iterable[Dict]]] = None, From 4f1840a88ad3a93244fc23149c56245704eab824 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 6 Sep 2023 09:30:53 +0200 Subject: [PATCH 424/562] Delete device messages asynchronously and in staged batches (#16240) --- changelog.d/16240.misc | 1 + synapse/handlers/device.py | 48 +++++++++++++++++++ synapse/handlers/presence.py | 4 +- synapse/handlers/sync.py | 16 +++++-- synapse/storage/databases/main/deviceinbox.py | 26 +++++++--- synapse/storage/databases/main/devices.py | 8 ---- synapse/storage/databases/main/receipts.py | 6 +-- synapse/storage/engines/_base.py | 6 +++ synapse/storage/engines/postgres.py | 4 ++ synapse/storage/engines/sqlite.py | 4 ++ .../main/delta/48/group_unique_indexes.py | 4 +- synapse/util/task_scheduler.py | 17 +++---- tests/handlers/test_device.py | 47 ++++++++++++++++++ 13 files changed, 154 insertions(+), 37 deletions(-) create mode 100644 changelog.d/16240.misc diff --git a/changelog.d/16240.misc b/changelog.d/16240.misc new file mode 100644 index 0000000000..4f266c1fb0 --- /dev/null +++ b/changelog.d/16240.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 763f56dfc1..9e52af5f13 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -43,9 +43,12 @@ from synapse.metrics.background_process_metrics import ( ) from synapse.types import ( JsonDict, + JsonMapping, + ScheduledTask, StrCollection, StreamKeyType, StreamToken, + TaskStatus, UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -62,6 +65,7 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +DELETE_DEVICE_MSGS_TASK_NAME = "delete_device_messages" MAX_DEVICE_DISPLAY_NAME_LEN = 100 DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000 @@ -78,6 +82,7 @@ class DeviceWorkerHandler: self._appservice_handler = hs.get_application_service_handler() self._state_storage = hs.get_storage_controllers().state self._auth_handler = hs.get_auth_handler() + self._event_sources = hs.get_event_sources() self.server_name = hs.hostname self._msc3852_enabled = hs.config.experimental.msc3852_enabled self._query_appservices_for_keys = ( @@ -386,6 +391,7 @@ class DeviceHandler(DeviceWorkerHandler): self._account_data_handler = hs.get_account_data_handler() self._storage_controllers = hs.get_storage_controllers() self.db_pool = hs.get_datastores().main.db_pool + self._task_scheduler = hs.get_task_scheduler() self.device_list_updater = DeviceListUpdater(hs, self) @@ -419,6 +425,10 @@ class DeviceHandler(DeviceWorkerHandler): self._delete_stale_devices, ) + self._task_scheduler.register_action( + self._delete_device_messages, DELETE_DEVICE_MSGS_TASK_NAME + ) + def _check_device_name_length(self, name: Optional[str]) -> None: """ Checks whether a device name is longer than the maximum allowed length. @@ -530,6 +540,7 @@ class DeviceHandler(DeviceWorkerHandler): user_id: The user to delete devices from. device_ids: The list of device IDs to delete """ + to_device_stream_id = self._event_sources.get_current_token().to_device_key try: await self.store.delete_devices(user_id, device_ids) @@ -559,12 +570,49 @@ class DeviceHandler(DeviceWorkerHandler): f"org.matrix.msc3890.local_notification_settings.{device_id}", ) + # Delete device messages asynchronously and in batches using the task scheduler + await self._task_scheduler.schedule_task( + DELETE_DEVICE_MSGS_TASK_NAME, + resource_id=device_id, + params={ + "user_id": user_id, + "device_id": device_id, + "up_to_stream_id": to_device_stream_id, + }, + ) + # Pushers are deleted after `delete_access_tokens_for_user` is called so that # modules using `on_logged_out` hook can use them if needed. await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids) await self.notify_device_update(user_id, device_ids) + DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 + + async def _delete_device_messages( + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" + assert task.params is not None + user_id = task.params["user_id"] + device_id = task.params["device_id"] + up_to_stream_id = task.params["up_to_stream_id"] + + res = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=up_to_stream_id, + limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, + ) + + if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: + return TaskStatus.COMPLETE, None, None + else: + # There is probably still device messages to be deleted, let's keep the task active and it will be run + # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). + return TaskStatus.ACTIVE, None, None + async def update_device(self, user_id: str, device_id: str, content: dict) -> None: """Update the given device diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index a4b05b72e7..375c7d0901 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -183,6 +183,7 @@ class BasePresenceHandler(abc.ABC): writer""" def __init__(self, hs: "HomeServer"): + self.hs = hs self.clock = hs.get_clock() self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() @@ -473,8 +474,6 @@ class _NullContextManager(ContextManager[None]): class WorkerPresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.hs = hs - self._presence_writer_instance = hs.config.worker.writers.presence[0] # Route presence EDUs to the right worker @@ -738,7 +737,6 @@ class WorkerPresenceHandler(BasePresenceHandler): class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.hs = hs self.wheel_timer: WheelTimer[str] = WheelTimer() self.notifier = hs.get_notifier() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 60a9f341b5..0ccd7d250c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -40,6 +40,7 @@ from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase +from synapse.handlers.device import DELETE_DEVICE_MSGS_TASK_NAME from synapse.handlers.relations import BundledAggregations from synapse.logging import issue9533_logger from synapse.logging.context import current_context @@ -268,6 +269,7 @@ class SyncHandler: self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state self._device_handler = hs.get_device_handler() + self._task_scheduler = hs.get_task_scheduler() self.should_calculate_push_rules = hs.config.push.enable_push @@ -360,11 +362,19 @@ class SyncHandler: # (since we now know that the device has received them) if since_token is not None: since_stream_id = since_token.to_device_key - deleted = await self.store.delete_messages_for_device( - sync_config.user.to_string(), sync_config.device_id, since_stream_id + # Delete device messages asynchronously and in batches using the task scheduler + await self._task_scheduler.schedule_task( + DELETE_DEVICE_MSGS_TASK_NAME, + resource_id=sync_config.device_id, + params={ + "user_id": sync_config.user.to_string(), + "device_id": sync_config.device_id, + "up_to_stream_id": since_stream_id, + }, ) logger.debug( - "Deleted %d to-device messages up to %d", deleted, since_stream_id + "Deletion of to-device messages up to %d scheduled", + since_stream_id, ) if timeout == 0 or since_token is None or full_state: diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 271cdf923c..744e98c6d0 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -445,13 +445,18 @@ class DeviceInboxWorkerStore(SQLBaseStore): @trace async def delete_messages_for_device( - self, user_id: str, device_id: Optional[str], up_to_stream_id: int + self, + user_id: str, + device_id: Optional[str], + up_to_stream_id: int, + limit: int, ) -> int: """ Args: user_id: The recipient user_id. device_id: The recipient device_id. up_to_stream_id: Where to delete messages up to. + limit: maximum number of messages to delete Returns: The number of messages deleted. @@ -472,12 +477,16 @@ class DeviceInboxWorkerStore(SQLBaseStore): log_kv({"message": "No changes in cache since last check"}) return 0 + ROW_ID_NAME = self.database_engine.row_id_name + def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: - sql = ( - "DELETE FROM device_inbox" - " WHERE user_id = ? AND device_id = ?" - " AND stream_id <= ?" - ) + sql = f""" + DELETE FROM device_inbox WHERE {ROW_ID_NAME} IN ( + SELECT {ROW_ID_NAME} FROM device_inbox + WHERE user_id = ? AND device_id = ? AND stream_id <= ? + LIMIT {limit} + ) + """ txn.execute(sql, (user_id, device_id, up_to_stream_id)) return txn.rowcount @@ -487,6 +496,11 @@ class DeviceInboxWorkerStore(SQLBaseStore): log_kv({"message": f"deleted {count} messages for device", "count": count}) + # In this case we don't know if we hit the limit or the delete is complete + # so let's not update the cache. + if count == limit: + return count + # Update the cache, ensuring that we only ever increase the value updated_last_deleted_stream_id = self._last_device_delete_cache.get( (user_id, device_id), 0 diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fa69a4a298..7208fc8b33 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1766,14 +1766,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): keyvalues={"user_id": user_id, "hidden": False}, ) - self.db_pool.simple_delete_many_txn( - txn, - table="device_inbox", - column="device_id", - values=device_ids, - keyvalues={"user_id": user_id}, - ) - self.db_pool.simple_delete_many_txn( txn, table="device_auth_providers", diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 5ee5c7ad9f..e4d10ff250 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -939,11 +939,7 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): receipts.""" def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: - if isinstance(self.database_engine, PostgresEngine): - ROW_ID_NAME = "ctid" - else: - ROW_ID_NAME = "rowid" - + ROW_ID_NAME = self.database_engine.row_id_name # Identify any duplicate receipts arising from # https://github.com/matrix-org/synapse/issues/14406. # The following query takes less than a minute on matrix.org. diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 0b5b3bf03e..b1a2418cbd 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -100,6 +100,12 @@ class BaseDatabaseEngine(Generic[ConnectionType, CursorType], metaclass=abc.ABCM """Gets a string giving the server version. For example: '3.22.0'""" ... + @property + @abc.abstractmethod + def row_id_name(self) -> str: + """Gets the literal name representing a row id for this engine.""" + ... + @abc.abstractmethod def in_transaction(self, conn: ConnectionType) -> bool: """Whether the connection is currently in a transaction.""" diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 05a72dc554..6309363217 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -211,6 +211,10 @@ class PostgresEngine( else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) + @property + def row_id_name(self) -> str: + return "ctid" + def in_transaction(self, conn: psycopg2.extensions.connection) -> bool: return conn.status != psycopg2.extensions.STATUS_READY diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index ca8c59297c..802069e1e1 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -123,6 +123,10 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): """Gets a string giving the server version. For example: '3.22.0'.""" return "%i.%i.%i" % sqlite3.sqlite_version_info + @property + def row_id_name(self) -> str: + return "rowid" + def in_transaction(self, conn: sqlite3.Connection) -> bool: return conn.in_transaction diff --git a/synapse/storage/schema/main/delta/48/group_unique_indexes.py b/synapse/storage/schema/main/delta/48/group_unique_indexes.py index ad2da4c8af..622686d28f 100644 --- a/synapse/storage/schema/main/delta/48/group_unique_indexes.py +++ b/synapse/storage/schema/main/delta/48/group_unique_indexes.py @@ -14,7 +14,7 @@ from synapse.storage.database import LoggingTransaction -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.prepare_database import get_statements FIX_INDEXES = """ @@ -37,7 +37,7 @@ CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None: - rowid = "ctid" if isinstance(database_engine, PostgresEngine) else "rowid" + rowid = database_engine.row_id_name # remove duplicates from group_users & group_invites tables cur.execute( diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 9e89aeb748..9b2581e51a 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -77,6 +77,7 @@ class TaskScheduler: LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000 # 24hrs def __init__(self, hs: "HomeServer"): + self._hs = hs self._store = hs.get_datastores().main self._clock = hs.get_clock() self._running_tasks: Set[str] = set() @@ -97,8 +98,6 @@ class TaskScheduler: "handle_scheduled_tasks", self._handle_scheduled_tasks, ) - else: - self.replication_client = hs.get_replication_command_handler() def register_action( self, @@ -133,7 +132,7 @@ class TaskScheduler: params: Optional[JsonMapping] = None, ) -> str: """Schedule a new potentially resumable task. A function matching the specified - `action` should have been previously registered with `register_action`. + `action` should have be registered with `register_action` before the task is run. Args: action: the name of a previously registered action @@ -149,11 +148,6 @@ class TaskScheduler: Returns: The id of the scheduled task """ - if action not in self._actions: - raise Exception( - f"No function associated with action {action} of the scheduled task" - ) - status = TaskStatus.SCHEDULED if timestamp is None or timestamp < self._clock.time_msec(): timestamp = self._clock.time_msec() @@ -175,7 +169,7 @@ class TaskScheduler: if self._run_background_tasks: await self._launch_task(task) else: - self.replication_client.send_new_active_task(task.id) + self._hs.get_replication_command_handler().send_new_active_task(task.id) return task.id @@ -315,7 +309,10 @@ class TaskScheduler: """ assert self._run_background_tasks - assert task.action in self._actions + if task.action not in self._actions: + raise Exception( + f"No function associated with action {task.action} of the scheduled task {task.id}" + ) function = self._actions[task.action] async def wrapper() -> None: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 55a4f95ef3..9659a4a355 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -30,6 +30,7 @@ from synapse.server import HomeServer from synapse.storage.databases.main.appservice import _make_exclusive_regex from synapse.types import JsonDict, create_requester from synapse.util import Clock +from synapse.util.task_scheduler import TaskScheduler from tests import unittest from tests.unittest import override_config @@ -49,6 +50,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): assert isinstance(handler, DeviceHandler) self.handler = handler self.store = hs.get_datastores().main + self.device_message_handler = hs.get_device_message_handler() return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -211,6 +213,51 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) self.assertIsNone(res) + def test_delete_device_and_big_device_inbox(self) -> None: + """Check that deleting a big device inbox is staged and batched asynchronously.""" + DEVICE_ID = "abc" + sender = "@sender:" + self.hs.hostname + receiver = "@receiver:" + self.hs.hostname + self._record_user(sender, DEVICE_ID, DEVICE_ID) + self._record_user(receiver, DEVICE_ID, DEVICE_ID) + + # queue a bunch of messages in the inbox + requester = create_requester(sender, device_id=DEVICE_ID) + for i in range(0, DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): + self.get_success( + self.device_message_handler.send_device_message( + requester, "message_type", {receiver: {"*": {"val": i}}} + ) + ) + + # delete the device + self.get_success(self.handler.delete_devices(receiver, [DEVICE_ID])) + + # messages should be deleted up to DEVICE_MSGS_DELETE_BATCH_LIMIT straight away + res = self.get_success( + self.store.db_pool.simple_select_list( + table="device_inbox", + keyvalues={"user_id": receiver}, + retcols=("user_id", "device_id", "stream_id"), + desc="get_device_id_from_device_inbox", + ) + ) + self.assertEqual(10, len(res)) + + # wait for the task scheduler to do a second delete pass + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000) + + # remaining messages should now be deleted + res = self.get_success( + self.store.db_pool.simple_select_list( + table="device_inbox", + keyvalues={"user_id": receiver}, + retcols=("user_id", "device_id", "stream_id"), + desc="get_device_id_from_device_inbox", + ) + ) + self.assertEqual(0, len(res)) + def test_update_device(self) -> None: self._record_users() From 698f6fa2508dbff1a4353d57da60be5d13bbd61d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 10:50:07 +0000 Subject: [PATCH 425/562] Allow modules to delete rooms. (#15997) * Allow user_id to be optional for room deletion * Add module API method to delete a room * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Don't worry about the case block=True && requester_user_id is None --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15997.misc | 1 + synapse/handlers/pagination.py | 12 ++++++++++-- synapse/handlers/room.py | 10 +++++++++- synapse/module_api/__init__.py | 13 +++++++++++++ .../callbacks/third_party_event_rules_callbacks.py | 11 ++++++++--- 5 files changed, 41 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15997.misc diff --git a/changelog.d/15997.misc b/changelog.d/15997.misc new file mode 100644 index 0000000000..94768c3cb8 --- /dev/null +++ b/changelog.d/15997.misc @@ -0,0 +1 @@ +Allow modules to delete rooms. \ No newline at end of file diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index e5ac9096cc..19cf5a2b43 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -713,7 +713,7 @@ class PaginationHandler: self, delete_id: str, room_id: str, - requester_user_id: str, + requester_user_id: Optional[str], new_room_user_id: Optional[str] = None, new_room_name: Optional[str] = None, message: Optional[str] = None, @@ -732,6 +732,10 @@ class PaginationHandler: requester_user_id: User who requested the action. Will be recorded as putting the room on the blocking list. + If None, the action was not manually requested but instead + triggered automatically, e.g. through a Synapse module + or some other policy. + MUST NOT be None if block=True. new_room_user_id: If set, a new room will be created with this user ID as the creator and admin, and all users in the old room will be @@ -818,7 +822,7 @@ class PaginationHandler: def start_shutdown_and_purge_room( self, room_id: str, - requester_user_id: str, + requester_user_id: Optional[str], new_room_user_id: Optional[str] = None, new_room_name: Optional[str] = None, message: Optional[str] = None, @@ -833,6 +837,10 @@ class PaginationHandler: requester_user_id: User who requested the action and put the room on the blocking list. + If None, the action was not manually requested but instead + triggered automatically, e.g. through a Synapse module + or some other policy. + MUST NOT be None if block=True. new_room_user_id: If set, a new room will be created with this user ID as the creator and admin, and all users in the old room will be diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 0513e28aab..7a762c8511 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1787,7 +1787,7 @@ class RoomShutdownHandler: async def shutdown_room( self, room_id: str, - requester_user_id: str, + requester_user_id: Optional[str], new_room_user_id: Optional[str] = None, new_room_name: Optional[str] = None, message: Optional[str] = None, @@ -1811,6 +1811,10 @@ class RoomShutdownHandler: requester_user_id: User who requested the action and put the room on the blocking list. + If None, the action was not manually requested but instead + triggered automatically, e.g. through a Synapse module + or some other policy. + MUST NOT be None if block=True. new_room_user_id: If set, a new room will be created with this user ID as the creator and admin, and all users in the old room will be @@ -1863,6 +1867,10 @@ class RoomShutdownHandler: # Action the block first (even if the room doesn't exist yet) if block: + if requester_user_id is None: + raise ValueError( + "shutdown_room: block=True not allowed when requester_user_id is None." + ) # This will work even if the room is already blocked, but that is # desirable in case the first attempt at blocking the room failed below. await self.store.block_room(room_id, requester_user_id) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 2f00a7ba20..d6efe10a28 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1730,6 +1730,19 @@ class ModuleApi: room_alias_str = room_alias.to_string() if room_alias else None return room_id, room_alias_str + async def delete_room(self, room_id: str) -> None: + """ + Schedules the deletion of a room from Synapse's database. + + If the room is already being deleted, this method does nothing. + This method does not wait for the room to be deleted. + + Added in Synapse v1.89.0. + """ + # Future extensions to this method might want to e.g. allow use of `force_purge`. + # TODO In the future we should make sure this is persistent. + self._hs.get_pagination_handler().start_shutdown_and_purge_room(room_id, None) + async def set_displayname( self, user_id: UserID, diff --git a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py index 911f37ba42..ecaeef3511 100644 --- a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py @@ -40,7 +40,7 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[ [str, StateMap[EventBase], str], Awaitable[bool] ] ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable] -CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[Optional[str], str], Awaitable[bool]] CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]] ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] @@ -429,12 +429,17 @@ class ThirdPartyEventRulesModuleApiCallbacks: "Failed to run module API callback %s: %s", callback, e ) - async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool: + async def check_can_shutdown_room( + self, user_id: Optional[str], room_id: str + ) -> bool: """Intercept requests to shutdown a room. If `False` is returned, the room must not be shut down. Args: - requester: The ID of the user requesting the shutdown. + user_id: The ID of the user requesting the shutdown. + If no user ID is supplied, then the room is being shut down through + some mechanism other than a user's request, e.g. through a module's + request. room_id: The ID of the room. """ for callback in self._check_can_shutdown_room_callbacks: From e937e2111a45d0cb3ecc973f95dafafecb6e9c36 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 13:01:10 +0000 Subject: [PATCH 426/562] Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. (#16219) * Add more suffixes to `parse_size` * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16219.feature | 1 + docs/usage/configuration/config_documentation.md | 4 +++- synapse/config/_base.py | 7 ++++--- 3 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16219.feature diff --git a/changelog.d/16219.feature b/changelog.d/16219.feature new file mode 100644 index 0000000000..c789f2abb7 --- /dev/null +++ b/changelog.d/16219.feature @@ -0,0 +1 @@ +Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 0b1725816e..97fd1beb39 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -25,8 +25,10 @@ messages from the database after 5 minutes, rather than 5 months. In addition, configuration options referring to size use the following suffixes: -* `M` = MiB, or 1,048,576 bytes * `K` = KiB, or 1024 bytes +* `M` = MiB, or 1,048,576 bytes +* `G` = GiB, or 1,073,741,824 bytes +* `T` = TiB, or 1,099,511,627,776 bytes For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes for a user avatar. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 69a8318127..58856839e1 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -179,8 +179,9 @@ class Config: If an integer is provided it is treated as bytes and is unchanged. - String byte sizes can have a suffix of 'K' or `M`, representing kibibytes and - mebibytes respectively. No suffix is understood as a plain byte count. + String byte sizes can have a suffix of 'K', `M`, `G` or `T`, + representing kibibytes, mebibytes, gibibytes and tebibytes respectively. + No suffix is understood as a plain byte count. Raises: TypeError, if given something other than an integer or a string @@ -189,7 +190,7 @@ class Config: if type(value) is int: # noqa: E721 return value elif isinstance(value, str): - sizes = {"K": 1024, "M": 1024 * 1024} + sizes = {"K": 1024, "M": 1024 * 1024, "G": 1024**3, "T": 1024**4} size = 1 suffix = value[-1] if suffix in sizes: From ffe4ea130279d10bdb988f60ebee6669ceeddbe7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 6 Sep 2023 14:34:01 +0100 Subject: [PATCH 427/562] Update rust in flake.nix: 1.70.0 -> 1.71.1 to address CVE-2023-38497 (#16260) --- changelog.d/16260.misc | 1 + flake.lock | 6 +++--- flake.nix | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16260.misc diff --git a/changelog.d/16260.misc b/changelog.d/16260.misc new file mode 100644 index 0000000000..9f3289d7d4 --- /dev/null +++ b/changelog.d/16260.misc @@ -0,0 +1 @@ +Update rust to version 1.71.1 in the nix development environment. \ No newline at end of file diff --git a/flake.lock b/flake.lock index d53be767a7..9b360fa33e 100644 --- a/flake.lock +++ b/flake.lock @@ -258,11 +258,11 @@ "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1690510705, - "narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=", + "lastModified": 1693966243, + "narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "851ae4c128905a62834d53ce7704ebc1ba481bea", + "rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index b89b6d9218..dc7ab5b3fe 100644 --- a/flake.nix +++ b/flake.nix @@ -82,7 +82,7 @@ # # NOTE: We currently need to set the Rust version unnecessarily high # in order to work around https://github.com/matrix-org/synapse/issues/15939 - (rust-bin.stable."1.70.0".default.override { + (rust-bin.stable."1.71.1".default.override { # Additionally install the "rust-src" extension to allow diving into the # Rust source code in an IDE (rust-analyzer will also make use of it). extensions = [ "rust-src" ]; From 35934b02a98cbb44ba310707a72e55bc4a5c7f0a Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 13:35:02 +0000 Subject: [PATCH 428/562] Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. (#16090) * Add gcc and GNU make to the Nix flake * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * unset LD_LIBRARY_PATH --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16090.misc | 1 + flake.nix | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/16090.misc diff --git a/changelog.d/16090.misc b/changelog.d/16090.misc new file mode 100644 index 0000000000..d54ef936c7 --- /dev/null +++ b/changelog.d/16090.misc @@ -0,0 +1 @@ +Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/flake.nix b/flake.nix index dc7ab5b3fe..69c9c19f89 100644 --- a/flake.nix +++ b/flake.nix @@ -89,6 +89,10 @@ }) # The rust-analyzer language server implementation. rust-analyzer + # GCC includes a linker; needed for building `ruff` + gcc + # Needed for building `ruff` + gnumake # Native dependencies for running Synapse. icu @@ -236,6 +240,19 @@ URI YAMLLibYAML ]}"; + + # Clear the LD_LIBRARY_PATH environment variable on shell init. + # + # By default, devenv will set LD_LIBRARY_PATH to point to .devenv/profile/lib. This causes + # issues when we include `gcc` as a dependency to build C libraries, as the version of glibc + # that the development environment's cc compiler uses may differ from that of the system. + # + # When LD_LIBRARY_PATH is set, system tools will attempt to use the development environment's + # libraries. Which, when built against an different glibc version lead, to "version 'GLIBC_X.YY' not + # found" errors. + enterShell = '' + unset LD_LIBRARY_PATH + ''; } ]; }; From 1940d990a345b44839039b3f6a9ee3f26757eb0e Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 6 Sep 2023 16:19:51 +0200 Subject: [PATCH 429/562] Revert MSC3861 introspection cache, admin impersonation and account lock (#16258) --- changelog.d/16258.bugfix | 1 + synapse/api/auth/msc3861_delegated.py | 91 +---------- synapse/replication/tcp/client.py | 12 -- synapse/rest/admin/__init__.py | 3 - synapse/rest/admin/oidc.py | 55 ------- synapse/storage/databases/main/cache.py | 13 -- synapse/storage/databases/main/devices.py | 9 - synapse/util/caches/expiringcache.py | 22 --- tests/handlers/test_oauth_delegation.py | 154 +++--------------- .../test_intro_token_invalidation.py | 62 ------- 10 files changed, 31 insertions(+), 391 deletions(-) create mode 100644 changelog.d/16258.bugfix delete mode 100644 synapse/rest/admin/oidc.py delete mode 100644 tests/replication/test_intro_token_invalidation.py diff --git a/changelog.d/16258.bugfix b/changelog.d/16258.bugfix new file mode 100644 index 0000000000..02ba9598a2 --- /dev/null +++ b/changelog.d/16258.bugfix @@ -0,0 +1 @@ +Revert MSC3861 introspection cache, admin impersonation and account lock. \ No newline at end of file diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index 14cba50c90..ef5d3f9b81 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -28,7 +28,6 @@ from twisted.web.http_headers import Headers from synapse.api.auth.base import BaseAuth from synapse.api.errors import ( AuthError, - Codes, HttpResponseException, InvalidClientTokenError, OAuthInsufficientScopeError, @@ -40,7 +39,6 @@ from synapse.logging.context import make_deferred_yieldable from synapse.types import Requester, UserID, create_requester from synapse.util import json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall -from synapse.util.caches.expiringcache import ExpiringCache if TYPE_CHECKING: from synapse.server import HomeServer @@ -109,20 +107,13 @@ class MSC3861DelegatedAuth(BaseAuth): assert self._config.client_id, "No client_id provided" assert auth_method is not None, "Invalid client_auth_method provided" + self._clock = hs.get_clock() self._http_client = hs.get_proxied_http_client() self._hostname = hs.hostname self._admin_token = self._config.admin_token self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) - self._clock = hs.get_clock() - self._token_cache: ExpiringCache[str, IntrospectionToken] = ExpiringCache( - cache_name="introspection_token_cache", - clock=self._clock, - max_len=10000, - expiry_ms=5 * 60 * 1000, - ) - if isinstance(auth_method, PrivateKeyJWTWithKid): # Use the JWK as the client secret when using the private_key_jwt method assert self._config.jwk, "No JWK provided" @@ -161,20 +152,6 @@ class MSC3861DelegatedAuth(BaseAuth): Returns: The introspection response """ - # check the cache before doing a request - introspection_token = self._token_cache.get(token, None) - - if introspection_token: - # check the expiration field of the token (if it exists) - exp = introspection_token.get("exp", None) - if exp: - time_now = self._clock.time() - expired = time_now > exp - if not expired: - return introspection_token - else: - return introspection_token - metadata = await self._issuer_metadata.get() introspection_endpoint = metadata.get("introspection_endpoint") raw_headers: Dict[str, str] = { @@ -188,10 +165,7 @@ class MSC3861DelegatedAuth(BaseAuth): # Fill the body/headers with credentials uri, raw_headers, body = self._client_auth.prepare( - method="POST", - uri=introspection_endpoint, - headers=raw_headers, - body=body, + method="POST", uri=introspection_endpoint, headers=raw_headers, body=body ) headers = Headers({k: [v] for (k, v) in raw_headers.items()}) @@ -233,20 +207,10 @@ class MSC3861DelegatedAuth(BaseAuth): "The introspection endpoint returned an invalid JSON response." ) - expiration = resp.get("exp", None) - if expiration: - if self._clock.time() > expiration: - raise InvalidClientTokenError("Token is expired.") - - introspection_token = IntrospectionToken(**resp) - - # add token to cache - self._token_cache[token] = introspection_token - - return introspection_token + return IntrospectionToken(**resp) async def is_server_admin(self, requester: Requester) -> bool: - return SCOPE_SYNAPSE_ADMIN in requester.scope + return "urn:synapse:admin:*" in requester.scope async def get_user_by_req( self, @@ -263,36 +227,6 @@ class MSC3861DelegatedAuth(BaseAuth): # so that we don't provision the user if they don't have enough permission: requester = await self.get_user_by_access_token(access_token, allow_expired) - # Allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter - if request.args is not None: - user_id_params = request.args.get(b"_oidc_admin_impersonate_user_id") - if user_id_params: - if await self.is_server_admin(requester): - user_id_str = user_id_params[0].decode("ascii") - impersonated_user_id = UserID.from_string(user_id_str) - logging.info(f"Admin impersonation of user {user_id_str}") - requester = create_requester( - user_id=impersonated_user_id, - scope=[SCOPE_MATRIX_API], - authenticated_entity=requester.user.to_string(), - ) - else: - raise AuthError( - 401, - "Impersonation not possible by a non admin user", - ) - - # Deny the request if the user account is locked. - if not allow_locked and await self.store.get_user_locked_status( - requester.user.to_string() - ): - raise AuthError( - 401, - "User account has been locked", - errcode=Codes.USER_LOCKED, - additional_fields={"soft_logout": True}, - ) - if not allow_guest and requester.is_guest: raise OAuthInsufficientScopeError([SCOPE_MATRIX_API]) @@ -309,14 +243,14 @@ class MSC3861DelegatedAuth(BaseAuth): # XXX: This is a temporary solution so that the admin API can be called by # the OIDC provider. This will be removed once we have OIDC client # credentials grant support in matrix-authentication-service. - logging.info("Admin token used") + logging.info("Admin toked used") # XXX: that user doesn't exist and won't be provisioned. # This is mostly fine for admin calls, but we should also think about doing # requesters without a user_id. admin_user = UserID("__oidc_admin", self._hostname) return create_requester( user_id=admin_user, - scope=[SCOPE_SYNAPSE_ADMIN], + scope=["urn:synapse:admin:*"], ) try: @@ -438,16 +372,3 @@ class MSC3861DelegatedAuth(BaseAuth): scope=scope, is_guest=(has_guest_scope and not has_user_scope), ) - - def invalidate_cached_tokens(self, keys: List[str]) -> None: - """ - Invalidate the entry(s) in the introspection token cache corresponding to the given key - """ - for key in keys: - self._token_cache.invalidate(key) - - def invalidate_token_cache(self) -> None: - """ - Invalidate the entire token cache. - """ - self._token_cache.invalidate_all() diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 3b88dc68ea..078c8d7074 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -28,7 +28,6 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yielda from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.streams import ( AccountDataStream, - CachesStream, DeviceListsStream, PushersStream, PushRulesStream, @@ -76,7 +75,6 @@ class ReplicationDataHandler: self._instance_name = hs.get_instance_name() self._typing_handler = hs.get_typing_handler() self._state_storage_controller = hs.get_storage_controllers().state - self.auth = hs.get_auth() self._notify_pushers = hs.config.worker.start_pushers self._pusher_pool = hs.get_pusherpool() @@ -224,16 +222,6 @@ class ReplicationDataHandler: self._state_storage_controller.notify_event_un_partial_stated( row.event_id ) - # invalidate the introspection token cache - elif stream_name == CachesStream.NAME: - for row in rows: - if row.cache_func == "introspection_token_invalidation": - if row.keys[0] is None: - # invalidate the whole cache - # mypy ignore - the token cache is defined on MSC3861DelegatedAuth - self.auth.invalidate_token_cache() # type: ignore[attr-defined] - else: - self.auth.invalidate_cached_tokens(row.keys) # type: ignore[attr-defined] await self._presence_handler.process_replication_rows( stream_name, instance_name, token, rows diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 55e752fda8..fe8177ed4d 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -47,7 +47,6 @@ from synapse.rest.admin.federation import ( ListDestinationsRestServlet, ) from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo -from synapse.rest.admin.oidc import OIDCTokenRevocationRestServlet from synapse.rest.admin.registration_tokens import ( ListRegistrationTokensRestServlet, NewRegistrationTokenRestServlet, @@ -298,8 +297,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: BackgroundUpdateRestServlet(hs).register(http_server) BackgroundUpdateStartJobRestServlet(hs).register(http_server) ExperimentalFeaturesRestServlet(hs).register(http_server) - if hs.config.experimental.msc3861.enabled: - OIDCTokenRevocationRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource( diff --git a/synapse/rest/admin/oidc.py b/synapse/rest/admin/oidc.py deleted file mode 100644 index 64d2d40550..0000000000 --- a/synapse/rest/admin/oidc.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2023 The Matrix.org Foundation C.I.C -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, Tuple - -from synapse.http.servlet import RestServlet -from synapse.http.site import SynapseRequest -from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class OIDCTokenRevocationRestServlet(RestServlet): - """ - Delete a given token introspection response - identified by the `jti` field - from the - introspection token cache when a token is revoked at the authorizing server - """ - - PATTERNS = admin_patterns("/OIDC_token_revocation/(?P[^/]*)") - - def __init__(self, hs: "HomeServer"): - super().__init__() - auth = hs.get_auth() - - # If this endpoint is loaded then we must have enabled delegated auth. - from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth - - assert isinstance(auth, MSC3861DelegatedAuth) - - self.auth = auth - self.store = hs.get_datastores().main - - async def on_DELETE( - self, request: SynapseRequest, token_id: str - ) -> Tuple[HTTPStatus, Dict]: - await assert_requester_is_admin(self.auth, request) - - self.auth._token_cache.invalidate(token_id) - - # make sure we invalidate the cache on any workers - await self.store.stream_introspection_token_invalidation((token_id,)) - - return HTTPStatus.OK, {} diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 18905e07b6..2fbd389c71 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -584,19 +584,6 @@ class CacheInvalidationWorkerStore(SQLBaseStore): else: return 0 - async def stream_introspection_token_invalidation( - self, key: Tuple[Optional[str]] - ) -> None: - """ - Stream an invalidation request for the introspection token cache to workers - - Args: - key: token_id of the introspection token to remove from the cache - """ - await self.send_invalidation_to_replication( - "introspection_token_invalidation", key - ) - @wrap_as_background_process("clean_up_old_cache_invalidations") async def _clean_up_cache_invalidation_wrapper(self) -> None: """ diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fa69a4a298..e4162f846b 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -33,7 +33,6 @@ from typing_extensions import Literal from synapse.api.constants import EduTypes from synapse.api.errors import Codes, StoreError -from synapse.config.homeserver import HomeServerConfig from synapse.logging.opentracing import ( get_active_span_text_map, set_tag, @@ -1664,7 +1663,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): self.device_id_exists_cache: LruCache[ Tuple[str, str], Literal[True] ] = LruCache(cache_name="device_id_exists", max_size=10000) - self.config: HomeServerConfig = hs.config async def store_device( self, @@ -1786,13 +1784,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): for device_id in device_ids: self.device_id_exists_cache.invalidate((user_id, device_id)) - # TODO: don't nuke the entire cache once there is a way to associate - # device_id -> introspection_token - if self.config.experimental.msc3861.enabled: - # mypy ignore - the token cache is defined on MSC3861DelegatedAuth - self.auth._token_cache.invalidate_all() # type: ignore[attr-defined] - await self.stream_introspection_token_invalidation((None,)) - async def update_device( self, user_id: str, device_id: str, new_display_name: Optional[str] = None ) -> None: diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 9a3e10ddee..01ad02af67 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -140,20 +140,6 @@ class ExpiringCache(Generic[KT, VT]): return value.value - def invalidate(self, key: KT) -> None: - """ - Remove the given key from the cache. - """ - - value = self._cache.pop(key, None) - if value: - if self.iterable: - self.metrics.inc_evictions( - EvictionReason.invalidation, len(value.value) - ) - else: - self.metrics.inc_evictions(EvictionReason.invalidation) - def __contains__(self, key: KT) -> bool: return key in self._cache @@ -207,14 +193,6 @@ class ExpiringCache(Generic[KT, VT]): len(self), ) - def invalidate_all(self) -> None: - """ - Remove all items from the cache. - """ - keys = set(self._cache.keys()) - for key in keys: - self._cache.pop(key) - def __len__(self) -> int: if self.iterable: return sum(len(entry.value) for entry in self._cache.values()) diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index b891e84690..503277cdff 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -14,7 +14,7 @@ from http import HTTPStatus from typing import Any, Dict, Union -from unittest.mock import ANY, AsyncMock, Mock +from unittest.mock import ANY, Mock from urllib.parse import parse_qs from signedjson.key import ( @@ -122,6 +122,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): "client_id": CLIENT_ID, "client_auth_method": "client_secret_post", "client_secret": CLIENT_SECRET, + "admin_token": "admin_token_value", } } return config @@ -340,41 +341,6 @@ class MSC3861OAuthDelegation(HomeserverTestCase): get_awaitable_result(self.auth.is_server_admin(requester)), False ) - def test_active_user_admin_impersonation(self) -> None: - """The handler should return a requester with normal user rights - and an user ID matching the one specified in query param `user_id`""" - - self.http_client.request = simple_async_mock( - return_value=FakeResponse.json( - code=200, - payload={ - "active": True, - "sub": SUBJECT, - "scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]), - "username": USERNAME, - }, - ) - ) - request = Mock(args={}) - request.args[b"access_token"] = [b"mockAccessToken"] - impersonated_user_id = f"@{USERNAME}:{SERVER_NAME}" - request.args[b"_oidc_admin_impersonate_user_id"] = [ - impersonated_user_id.encode("ascii") - ] - request.requestHeaders.getRawHeaders = mock_getRawHeaders() - requester = self.get_success(self.auth.get_user_by_req(request)) - self.http_client.get_json.assert_called_once_with(WELL_KNOWN) - self.http_client.request.assert_called_once_with( - method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY - ) - self._assertParams() - self.assertEqual(requester.user.to_string(), impersonated_user_id) - self.assertEqual(requester.is_guest, False) - self.assertEqual(requester.device_id, None) - self.assertEqual( - get_awaitable_result(self.auth.is_server_admin(requester)), False - ) - def test_active_user_with_device(self) -> None: """The handler should return a requester with normal user rights and a device ID.""" @@ -526,100 +492,6 @@ class MSC3861OAuthDelegation(HomeserverTestCase): error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) self.assertEqual(error.value.code, 503) - def test_introspection_token_cache(self) -> None: - access_token = "open_sesame" - self.http_client.request = simple_async_mock( - return_value=FakeResponse.json( - code=200, - payload={"active": "true", "scope": "guest", "jti": access_token}, - ) - ) - - # first call should cache response - # Mpyp ignores below are due to mypy not understanding the dynamic substitution of msc3861 auth code - # for regular auth code via the config - self.get_success( - self.auth._introspect_token(access_token) # type: ignore[attr-defined] - ) - introspection_token = self.auth._token_cache.get(access_token) # type: ignore[attr-defined] - self.assertEqual(introspection_token["jti"], access_token) - # there's been one http request - self.http_client.request.assert_called_once() - - # second call should pull from cache, there should still be only one http request - token = self.get_success(self.auth._introspect_token(access_token)) # type: ignore[attr-defined] - self.http_client.request.assert_called_once() - self.assertEqual(token["jti"], access_token) - - # advance past five minutes and check that cache expired - there should be more than one http call now - self.reactor.advance(360) - token_2 = self.get_success(self.auth._introspect_token(access_token)) # type: ignore[attr-defined] - self.assertEqual(self.http_client.request.call_count, 2) - self.assertEqual(token_2["jti"], access_token) - - # test that if a cached token is expired, a fresh token will be pulled from authorizing server - first add a - # token with a soon-to-expire `exp` field to the cache - self.http_client.request = simple_async_mock( - return_value=FakeResponse.json( - code=200, - payload={ - "active": "true", - "scope": "guest", - "jti": "stale", - "exp": self.clock.time() + 100, - }, - ) - ) - self.get_success( - self.auth._introspect_token("stale") # type: ignore[attr-defined] - ) - introspection_token = self.auth._token_cache.get("stale") # type: ignore[attr-defined] - self.assertEqual(introspection_token["jti"], "stale") - self.assertEqual(self.http_client.request.call_count, 1) - - # advance the reactor past the token expiry but less than the cache expiry - self.reactor.advance(120) - self.assertEqual(self.auth._token_cache.get("stale"), introspection_token) # type: ignore[attr-defined] - - # check that the next call causes another http request (which will fail because the token is technically expired - # but the important thing is we discard the token from the cache and try the network) - self.get_failure( - self.auth._introspect_token("stale"), InvalidClientTokenError # type: ignore[attr-defined] - ) - self.assertEqual(self.http_client.request.call_count, 2) - - def test_revocation_endpoint(self) -> None: - # mock introspection response and then admin verification response - self.http_client.request = AsyncMock( - side_effect=[ - FakeResponse.json( - code=200, payload={"active": True, "jti": "open_sesame"} - ), - FakeResponse.json( - code=200, - payload={ - "active": True, - "sub": SUBJECT, - "scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]), - "username": USERNAME, - }, - ), - ] - ) - - # cache a token to delete - introspection_token = self.get_success( - self.auth._introspect_token("open_sesame") # type: ignore[attr-defined] - ) - self.assertEqual(self.auth._token_cache.get("open_sesame"), introspection_token) # type: ignore[attr-defined] - - # delete the revoked token - introspection_token_id = "open_sesame" - url = f"/_synapse/admin/v1/OIDC_token_revocation/{introspection_token_id}" - channel = self.make_request("DELETE", url, access_token="mockAccessToken") - self.assertEqual(channel.code, 200) - self.assertEqual(self.auth._token_cache.get("open_sesame"), None) # type: ignore[attr-defined] - def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: # We only generate a master key to simplify the test. master_signing_key = generate_signing_key(device_id) @@ -791,3 +663,25 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self.expect_unrecognized("GET", "/_synapse/admin/v1/users/foo/admin") self.expect_unrecognized("PUT", "/_synapse/admin/v1/users/foo/admin") self.expect_unrecognized("POST", "/_synapse/admin/v1/account_validity/validity") + + def test_admin_token(self) -> None: + """The handler should return a requester with admin rights when admin_token is used.""" + self.http_client.request = simple_async_mock( + return_value=FakeResponse.json(code=200, payload={"active": False}), + ) + + request = Mock(args={}) + request.args[b"access_token"] = [b"admin_token_value"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.assertEqual( + requester.user.to_string(), "@%s:%s" % ("__oidc_admin", SERVER_NAME) + ) + self.assertEqual(requester.is_guest, False) + self.assertEqual(requester.device_id, None) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), True + ) + + # There should be no call to the introspection endpoint + self.http_client.request.assert_not_called() diff --git a/tests/replication/test_intro_token_invalidation.py b/tests/replication/test_intro_token_invalidation.py deleted file mode 100644 index f90678b6b1..0000000000 --- a/tests/replication/test_intro_token_invalidation.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2023 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Dict - -import synapse.rest.admin._base - -from tests.replication._base import BaseMultiWorkerStreamTestCase - - -class IntrospectionTokenCacheInvalidationTestCase(BaseMultiWorkerStreamTestCase): - servlets = [synapse.rest.admin.register_servlets] - - def default_config(self) -> Dict[str, Any]: - config = super().default_config() - config["disable_registration"] = True - config["experimental_features"] = { - "msc3861": { - "enabled": True, - "issuer": "some_dude", - "client_id": "ID", - "client_auth_method": "client_secret_post", - "client_secret": "secret", - } - } - return config - - def test_stream_introspection_token_invalidation(self) -> None: - worker_hs = self.make_worker_hs("synapse.app.generic_worker") - auth = worker_hs.get_auth() - store = self.hs.get_datastores().main - - # add a token to the cache on the worker - auth._token_cache["open_sesame"] = "intro_token" # type: ignore[attr-defined] - - # stream the invalidation from the master - self.get_success( - store.stream_introspection_token_invalidation(("open_sesame",)) - ) - - # check that the cache on the worker was invalidated - self.assertEqual(auth._token_cache.get("open_sesame"), None) # type: ignore[attr-defined] - - # test invalidating whole cache - for i in range(0, 5): - auth._token_cache[f"open_sesame_{i}"] = f"intro_token_{i}" # type: ignore[attr-defined] - self.assertEqual(len(auth._token_cache), 5) # type: ignore[attr-defined] - - self.get_success(store.stream_introspection_token_invalidation((None,))) - - self.assertEqual(len(auth._token_cache), 0) # type: ignore[attr-defined] From c9282baf033aed085ecf909ade9069d1c88308b4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 6 Sep 2023 11:01:56 -0400 Subject: [PATCH 430/562] 1.91.2 --- CHANGES.md | 7 +++++++ changelog.d/16258.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 14 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/16258.bugfix diff --git a/CHANGES.md b/CHANGES.md index 7bd9d31619..591c60c77e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.91.2 (2023-09-06) + +### Bugfixes + +- Revert MSC3861 introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) + + # Synapse 1.91.1 (2023-09-04) ### Bugfixes diff --git a/changelog.d/16258.bugfix b/changelog.d/16258.bugfix deleted file mode 100644 index 02ba9598a2..0000000000 --- a/changelog.d/16258.bugfix +++ /dev/null @@ -1 +0,0 @@ -Revert MSC3861 introspection cache, admin impersonation and account lock. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index f737041567..cef3ca7b23 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.91.2) stable; urgency=medium + + * New synapse release 1.91.2. + + -- Synapse Packaging team Wed, 06 Sep 2023 14:59:30 +0000 + matrix-synapse-py3 (1.91.1) stable; urgency=medium * New Synapse release 1.91.1. diff --git a/pyproject.toml b/pyproject.toml index 409b27d902..0591e265c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.91.1" +version = "1.91.2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 9de615b3aa4f20cab182cf3822943b9465a30643 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 6 Sep 2023 11:10:57 -0400 Subject: [PATCH 431/562] Link to MSC in changelog. --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 591c60c77e..b112d0b2ad 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,7 +2,7 @@ ### Bugfixes -- Revert MSC3861 introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) +- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) # Synapse 1.91.1 (2023-09-04) From 51303035f2366d60772473f42c64ae6cad6684d0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 6 Sep 2023 15:15:56 +0000 Subject: [PATCH 432/562] Apply missed suggestions from the review of #16090. (#16263) * Suggestions from PR * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16263.misc | 1 + flake.nix | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16263.misc diff --git a/changelog.d/16263.misc b/changelog.d/16263.misc new file mode 100644 index 0000000000..d54ef936c7 --- /dev/null +++ b/changelog.d/16263.misc @@ -0,0 +1 @@ +Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/flake.nix b/flake.nix index 69c9c19f89..31f2832939 100644 --- a/flake.nix +++ b/flake.nix @@ -89,6 +89,7 @@ }) # The rust-analyzer language server implementation. rust-analyzer + # GCC includes a linker; needed for building `ruff` gcc # Needed for building `ruff` @@ -248,8 +249,8 @@ # that the development environment's cc compiler uses may differ from that of the system. # # When LD_LIBRARY_PATH is set, system tools will attempt to use the development environment's - # libraries. Which, when built against an different glibc version lead, to "version 'GLIBC_X.YY' not - # found" errors. + # libraries. Which, when built against a different glibc version lead, to "version 'GLIBC_X.YY' + # not found" errors. enterShell = '' unset LD_LIBRARY_PATH ''; From fd50a9b47cf700024ad4eb19411016a5131e2d20 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 6 Sep 2023 13:06:33 -0400 Subject: [PATCH 433/562] Add back newsfile from #16258. --- changelog.d/16258.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/16258.bugfix diff --git a/changelog.d/16258.bugfix b/changelog.d/16258.bugfix new file mode 100644 index 0000000000..d5ae2399e6 --- /dev/null +++ b/changelog.d/16258.bugfix @@ -0,0 +1 @@ +Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. From fe69e7f617199f51eb97f510a0a934fdcf02fbad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Grimpard?= Date: Wed, 6 Sep 2023 20:32:24 +0200 Subject: [PATCH 434/562] Handle "registration_enabled" parameter for CAS (#16262) Similar to OIDC, CAS providers can now disable registration such that only existing users are able to login via SSO. --- changelog.d/16262.feature | 1 + .../usage/configuration/config_documentation.md | 7 +++++++ synapse/config/cas.py | 3 +++ synapse/handlers/cas.py | 2 ++ tests/handlers/test_cas.py | 17 +++++++++++++++++ 5 files changed, 30 insertions(+) create mode 100644 changelog.d/16262.feature diff --git a/changelog.d/16262.feature b/changelog.d/16262.feature new file mode 100644 index 0000000000..7c8e7e349b --- /dev/null +++ b/changelog.d/16262.feature @@ -0,0 +1 @@ +Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 97fd1beb39..42df53d52b 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3430,6 +3430,12 @@ Has the following sub-options: and the values must match the given value. Alternately if the given value is `None` then any value is allowed (the attribute just must exist). All of the listed attributes must match for the login to be permitted. +* `enable_registration`: set to 'false' to disable automatic registration of new + users. This allows the CAS SSO flow to be limited to sign in only, rather than + automatically registering users that have a valid SSO login but do not have + a pre-registered account. Defaults to true. + + *Added in Synapse 1.93.0.* Example configuration: ```yaml @@ -3441,6 +3447,7 @@ cas_config: required_attributes: userGroup: "staff" department: None + enable_registration: true ``` --- ### `sso` diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 6e2d9addbf..bbc8f43073 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -57,6 +57,8 @@ class CasConfig(Config): required_attributes ) + self.cas_enable_registration = cas_config.get("enable_registration", True) + self.idp_name = cas_config.get("idp_name", "CAS") self.idp_icon = cas_config.get("idp_icon") self.idp_brand = cas_config.get("idp_brand") @@ -67,6 +69,7 @@ class CasConfig(Config): self.cas_protocol_version = None self.cas_displayname_attribute = None self.cas_required_attributes = [] + self.cas_enable_registration = False # CAS uses a legacy required attributes mapping, not the one provided by diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py index a850545453..b5b8b9bd35 100644 --- a/synapse/handlers/cas.py +++ b/synapse/handlers/cas.py @@ -70,6 +70,7 @@ class CasHandler: self._cas_protocol_version = hs.config.cas.cas_protocol_version self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute self._cas_required_attributes = hs.config.cas.cas_required_attributes + self._cas_enable_registration = hs.config.cas.cas_enable_registration self._http_client = hs.get_proxied_http_client() @@ -395,4 +396,5 @@ class CasHandler: client_redirect_url, cas_response_to_user_attributes, grandfather_existing_users, + registration_enabled=self._cas_enable_registration, ) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 8582b1cd1e..13e2cd153a 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -197,6 +197,23 @@ class CasHandlerTestCase(HomeserverTestCase): auth_provider_session_id=None, ) + @override_config({"cas_config": {"enable_registration": False}}) + def test_map_cas_user_does_not_register_new_user(self) -> None: + """Ensures new users are not registered if the enabled registration flag is disabled.""" + + # stub out the auth handler + auth_handler = self.hs.get_auth_handler() + auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] + + cas_response = CasResponse("test_user", {}) + request = _mock_request() + self.get_success( + self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") + ) + + # check that the auth handler was not called as expected + auth_handler.complete_sso_login.assert_not_called() + def _mock_request() -> Mock: """Returns a mock which will stand in as a SynapseRequest""" From 13e9cad537a16108b0cb544ccdc24e7dc2ca33ae Mon Sep 17 00:00:00 2001 From: Marcel Date: Wed, 6 Sep 2023 21:19:17 +0200 Subject: [PATCH 435/562] Send the opentracing span information to appservices (#16227) --- changelog.d/16227.feature | 1 + synapse/appservice/api.py | 32 ++++++++++++++++++++++++-------- tests/appservice/test_api.py | 18 ++++++++++++------ 3 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 changelog.d/16227.feature diff --git a/changelog.d/16227.feature b/changelog.d/16227.feature new file mode 100644 index 0000000000..510062b622 --- /dev/null +++ b/changelog.d/16227.feature @@ -0,0 +1 @@ +Add span information to requests sent to appservices. Contributed by MTRNord. \ No newline at end of file diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index de7a94bf26..b1523be208 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -40,6 +40,7 @@ from synapse.appservice import ( from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig, serialize_event from synapse.http.client import SimpleHttpClient, is_unknown_endpoint +from synapse.logging import opentracing from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID from synapse.util.caches.response_cache import ResponseCache @@ -125,6 +126,17 @@ class ApplicationServiceApi(SimpleHttpClient): hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS ) + def _get_headers(self, service: "ApplicationService") -> Dict[bytes, List[bytes]]: + """This makes sure we have always the auth header and opentracing headers set.""" + + # This is also ensured before in the functions. However this is needed to please + # the typechecks. + assert service.hs_token is not None + + headers = {b"Authorization": [b"Bearer " + service.hs_token.encode("ascii")]} + opentracing.inject_header_dict(headers, check_destination=False) + return headers + async def query_user(self, service: "ApplicationService", user_id: str) -> bool: if service.url is None: return False @@ -136,10 +148,11 @@ class ApplicationServiceApi(SimpleHttpClient): args = None if self.config.use_appservice_legacy_authorization: args = {"access_token": service.hs_token} + response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}", args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if response is not None: # just an empty json object return True @@ -162,10 +175,11 @@ class ApplicationServiceApi(SimpleHttpClient): args = None if self.config.use_appservice_legacy_authorization: args = {"access_token": service.hs_token} + response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}", args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if response is not None: # just an empty json object return True @@ -203,10 +217,11 @@ class ApplicationServiceApi(SimpleHttpClient): **fields, b"access_token": service.hs_token, } + response = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}", args=args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if not isinstance(response, list): logger.warning( @@ -243,10 +258,11 @@ class ApplicationServiceApi(SimpleHttpClient): args = None if self.config.use_appservice_legacy_authorization: args = {"access_token": service.hs_token} + info = await self.get_json( f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}", args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if not _is_valid_3pe_metadata(info): @@ -283,7 +299,7 @@ class ApplicationServiceApi(SimpleHttpClient): await self.post_json_get_json( uri=f"{service.url}{APP_SERVICE_PREFIX}/ping", post_json={"transaction_id": txn_id}, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) async def push_bulk( @@ -364,7 +380,7 @@ class ApplicationServiceApi(SimpleHttpClient): f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}", json_body=body, args=args, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) if logger.isEnabledFor(logging.DEBUG): logger.debug( @@ -437,7 +453,7 @@ class ApplicationServiceApi(SimpleHttpClient): response = await self.post_json_get_json( uri, body, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) except HttpResponseException as e: # The appservice doesn't support this endpoint. @@ -498,7 +514,7 @@ class ApplicationServiceApi(SimpleHttpClient): response = await self.post_json_get_json( uri, query, - headers={"Authorization": [f"Bearer {service.hs_token}"]}, + headers=self._get_headers(service), ) except HttpResponseException as e: # The appservice doesn't support this endpoint. diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 75fb5fae6b..366b6fd5f0 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -76,7 +76,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], ) -> List[JsonDict]: # Ensure the access token is passed as a header. - if not headers or not headers.get("Authorization"): + if not headers or not headers.get(b"Authorization"): raise RuntimeError("Access token not provided") # ... and not as a query param if b"access_token" in args: @@ -84,7 +84,9 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): "Access token should not be passed as a query param." ) - self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.assertEqual( + headers.get(b"Authorization"), [f"Bearer {TOKEN}".encode()] + ) self.request_url = url if url == URL_USER: return SUCCESS_RESULT_USER @@ -152,11 +154,13 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): # Ensure the access token is passed as a both a query param and in the headers. if not args.get(b"access_token"): raise RuntimeError("Access token should be provided in query params.") - if not headers or not headers.get("Authorization"): + if not headers or not headers.get(b"Authorization"): raise RuntimeError("Access token should be provided in auth headers.") self.assertEqual(args.get(b"access_token"), TOKEN) - self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.assertEqual( + headers.get(b"Authorization"), [f"Bearer {TOKEN}".encode()] + ) self.request_url = url if url == URL_USER: return SUCCESS_RESULT_USER @@ -208,10 +212,12 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], ) -> JsonDict: # Ensure the access token is passed as both a header and query arg. - if not headers.get("Authorization"): + if not headers.get(b"Authorization"): raise RuntimeError("Access token not provided") - self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.assertEqual( + headers.get(b"Authorization"), [f"Bearer {TOKEN}".encode()] + ) return RESPONSE # We assign to a method, which mypy doesn't like. From a83f75a37dba765df78319c57c296a3a1ca27e05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 15:19:56 -0400 Subject: [PATCH 436/562] Bump gitpython from 3.1.32 to 3.1.34 (#16267) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 872a863edc..d7bbfbd358 100644 --- a/poetry.lock +++ b/poetry.lock @@ -586,13 +586,13 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.32" +version = "3.1.34" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"}, - {file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"}, + {file = "GitPython-3.1.34-py3-none-any.whl", hash = "sha256:5d3802b98a3bae1c2b8ae0e1ff2e4aa16bcdf02c145da34d092324f599f01395"}, + {file = "GitPython-3.1.34.tar.gz", hash = "sha256:85f7d365d1f6bf677ae51039c1ef67ca59091c7ebd5a3509aa399d4eda02d6dd"}, ] [package.dependencies] From 8940d1b28ecbaf9185459e2af62169ecf39a96f5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Sep 2023 10:26:07 +0100 Subject: [PATCH 437/562] Add `/notifications` endpoint to workers (#16265) --- changelog.d/16265.feature | 1 + docker/configure_workers_and_start.py | 1 + docs/workers.md | 1 + synapse/rest/__init__.py | 2 +- synapse/rest/client/notifications.py | 2 + .../databases/main/event_push_actions.py | 72 +++++++++---------- 6 files changed, 42 insertions(+), 37 deletions(-) create mode 100644 changelog.d/16265.feature diff --git a/changelog.d/16265.feature b/changelog.d/16265.feature new file mode 100644 index 0000000000..3ffa16dbcb --- /dev/null +++ b/changelog.d/16265.feature @@ -0,0 +1 @@ +Allow `/notifications` endpoint to be routed to workers. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 400a7515aa..62952e6b26 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -183,6 +183,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_matrix/client/(r0|v3|unstable)/password_policy$", "^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$", "^/_matrix/client/(r0|v3|unstable)/capabilities$", + "^/_matrix/client/(r0|v3|unstable)/notifications$", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/workers.md b/docs/workers.md index 24bd22724e..dc76b073de 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -246,6 +246,7 @@ information. ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$) ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$ ^/_matrix/client/(r0|v3|unstable)/capabilities$ + ^/_matrix/client/(r0|v3|unstable)/notifications$ # Encryption requests ^/_matrix/client/(r0|v3|unstable)/keys/query$ diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index df0845edb2..1be9c47c61 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -123,7 +123,7 @@ class ClientRestResource(JsonResource): if is_main_process: report_event.register_servlets(hs, client_resource) openid.register_servlets(hs, client_resource) - notifications.register_servlets(hs, client_resource) + notifications.register_servlets(hs, client_resource) devices.register_servlets(hs, client_resource) if is_main_process: thirdparty.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index ea10042569..e7fe1332e7 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -36,6 +36,8 @@ logger = logging.getLogger(__name__) class NotificationsServlet(RestServlet): PATTERNS = client_patterns("/notifications$") + CATEGORY = "Client API requests" + def __init__(self, hs: "HomeServer"): super().__init__() self.store = hs.get_datastores().main diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 07bda7d6be..b958a39aeb 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1740,42 +1740,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # We sleep to ensure that we don't overwhelm the DB. await self._clock.sleep(1.0) - -class EventPushActionsStore(EventPushActionsWorkerStore): - EPA_HIGHLIGHT_INDEX = "epa_highlight_index" - - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - - self.db_pool.updates.register_background_index_update( - self.EPA_HIGHLIGHT_INDEX, - index_name="event_push_actions_u_highlight", - table="event_push_actions", - columns=["user_id", "stream_ordering"], - ) - - self.db_pool.updates.register_background_index_update( - "event_push_actions_highlights_index", - index_name="event_push_actions_highlights_index", - table="event_push_actions", - columns=["user_id", "room_id", "topological_ordering", "stream_ordering"], - where_clause="highlight=1", - ) - - # Add index to make deleting old push actions faster. - self.db_pool.updates.register_background_index_update( - "event_push_actions_stream_highlight_index", - index_name="event_push_actions_stream_highlight_index", - table="event_push_actions", - columns=["highlight", "stream_ordering"], - where_clause="highlight=0", - ) - async def get_push_actions_for_user( self, user_id: str, @@ -1834,6 +1798,42 @@ class EventPushActionsStore(EventPushActionsWorkerStore): ] +class EventPushActionsStore(EventPushActionsWorkerStore): + EPA_HIGHLIGHT_INDEX = "epa_highlight_index" + + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + self.EPA_HIGHLIGHT_INDEX, + index_name="event_push_actions_u_highlight", + table="event_push_actions", + columns=["user_id", "stream_ordering"], + ) + + self.db_pool.updates.register_background_index_update( + "event_push_actions_highlights_index", + index_name="event_push_actions_highlights_index", + table="event_push_actions", + columns=["user_id", "room_id", "topological_ordering", "stream_ordering"], + where_clause="highlight=1", + ) + + # Add index to make deleting old push actions faster. + self.db_pool.updates.register_background_index_update( + "event_push_actions_stream_highlight_index", + index_name="event_push_actions_stream_highlight_index", + table="event_push_actions", + columns=["highlight", "stream_ordering"], + where_clause="highlight=0", + ) + + def _action_has_highlight(actions: Collection[Union[Mapping, str]]) -> bool: for action in actions: if not isinstance(action, dict): From 7e98d382f9671d5b59599939b36c00fb8f955a87 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 7 Sep 2023 07:00:41 -0400 Subject: [PATCH 438/562] Support releasing on macOS. (#16266) --- changelog.d/16266.misc | 1 + scripts-dev/release.py | 46 +++++++++++++++++++++++++++--------------- 2 files changed, 31 insertions(+), 16 deletions(-) create mode 100644 changelog.d/16266.misc diff --git a/changelog.d/16266.misc b/changelog.d/16266.misc new file mode 100644 index 0000000000..ac594c4ac4 --- /dev/null +++ b/changelog.d/16266.misc @@ -0,0 +1 @@ +Update the release script to work on macOS. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 4ac8eaa889..74f41a40ec 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -244,11 +244,17 @@ def _prepare() -> None: else: debian_version = new_version - run_until_successful( - f'dch -M -v {debian_version} "New Synapse release {new_version}."', - shell=True, - ) - run_until_successful('dch -M -r -D stable ""', shell=True) + if sys.platform == "darwin": + run_until_successful( + f"docker run --rm -v .:/synapse ubuntu:latest /synapse/scripts-dev/docker_update_debian_changelog.sh {new_version}", + shell=True, + ) + else: + run_until_successful( + f'dch -M -v {debian_version} "New Synapse release {new_version}."', + shell=True, + ) + run_until_successful('dch -M -r -D stable ""', shell=True) # Show the user the changes and ask if they want to edit the change log. synapse_repo.git.add("-u") @@ -566,19 +572,27 @@ def _notify(message: str) -> None: # for this. click.echo(f"\a{message}") + app_name = "Synapse Release Script" + # Try and run notify-send, but don't raise an Exception if this fails # (This is best-effort) - # TODO Support other platforms? - subprocess.run( - [ - "notify-send", - "--app-name", - "Synapse Release Script", - "--expire-time", - "3600000", - message, - ] - ) + if sys.platform == "darwin": + # See https://developer.apple.com/library/archive/documentation/AppleScript/Conceptual/AppleScriptLangGuide/reference/ASLR_cmds.html#//apple_ref/doc/uid/TP40000983-CH216-SW224 + subprocess.run( + f"""osascript -e 'display notification "{message}" with title "{app_name}"'""", + shell=True, + ) + else: + subprocess.run( + [ + "notify-send", + "--app-name", + app_name, + "--expire-time", + "3600000", + message, + ] + ) @cli.command() From 1cd410a7833984ef69a7dcecf8997f4c45d609cd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Sep 2023 13:45:43 +0100 Subject: [PATCH 439/562] Recheck if remote device is cached before requesting it (#16252) This fixes a bug where we could get stuck re-requesting the device over replication again and again. --- changelog.d/16252.bugfix | 1 + synapse/handlers/device.py | 21 ++++++++++++------ synapse/replication/http/devices.py | 4 ++-- synapse/storage/databases/main/devices.py | 26 +++++++++++++++-------- 4 files changed, 35 insertions(+), 17 deletions(-) create mode 100644 changelog.d/16252.bugfix diff --git a/changelog.d/16252.bugfix b/changelog.d/16252.bugfix new file mode 100644 index 0000000000..881bc00e61 --- /dev/null +++ b/changelog.d/16252.bugfix @@ -0,0 +1 @@ +Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9e52af5f13..9356ae998e 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1030,7 +1030,7 @@ class DeviceListWorkerUpdater: async def multi_user_device_resync( self, user_ids: List[str], mark_failed_as_stale: bool = True - ) -> Dict[str, Optional[JsonDict]]: + ) -> Dict[str, Optional[JsonMapping]]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1059,6 +1059,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): self._notifier = hs.get_notifier() self._remote_edu_linearizer = Linearizer(name="remote_device_list") + self._resync_linearizer = Linearizer(name="remote_device_resync") # user_id -> list of updates waiting to be handled. self._pending_updates: Dict[ @@ -1301,7 +1302,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): async def multi_user_device_resync( self, user_ids: List[str], mark_failed_as_stale: bool = True - ) -> Dict[str, Optional[JsonDict]]: + ) -> Dict[str, Optional[JsonMapping]]: """ Like `user_device_resync` but operates on multiple users **from the same origin** at once. @@ -1321,9 +1322,11 @@ class DeviceListUpdater(DeviceListWorkerUpdater): failed = set() # TODO(Perf): Actually batch these up for user_id in user_ids: - user_result, user_failed = await self._user_device_resync_returning_failed( - user_id - ) + async with self._resync_linearizer.queue(user_id): + ( + user_result, + user_failed, + ) = await self._user_device_resync_returning_failed(user_id) result[user_id] = user_result if user_failed: failed.add(user_id) @@ -1335,7 +1338,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): async def _user_device_resync_returning_failed( self, user_id: str - ) -> Tuple[Optional[JsonDict], bool]: + ) -> Tuple[Optional[JsonMapping], bool]: """Fetches all devices for a user and updates the device cache with them. Args: @@ -1348,6 +1351,12 @@ class DeviceListUpdater(DeviceListWorkerUpdater): e.g. due to a connection problem. - True iff the resync failed and the device list should be marked as stale. """ + # Check that we haven't gone and fetched the devices since we last + # checked if we needed to resync these device lists. + if await self.store.get_users_whose_devices_are_cached([user_id]): + cached = await self.store.get_cached_devices_for_user(user_id) + return cached, False + logger.debug("Attempting to resync the device list for %s", user_id) log_kv({"message": "Doing resync to update device list."}) # Fetch all devices for the user. diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 209833d287..b8198e059c 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -20,7 +20,7 @@ from twisted.web.server import Request from synapse.http.server import HttpServer from synapse.logging.opentracing import active_span from synapse.replication.http._base import ReplicationEndpoint -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping if TYPE_CHECKING: from synapse.server import HomeServer @@ -82,7 +82,7 @@ class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint): async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict - ) -> Tuple[int, Dict[str, Optional[JsonDict]]]: + ) -> Tuple[int, Dict[str, Optional[JsonMapping]]]: user_ids: List[str] = content["user_ids"] logger.info("Resync for %r", user_ids) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 324fdfa892..70faf4b1ec 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -759,18 +759,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): mapping of user_id -> device_id -> device_info. """ unique_user_ids = user_ids | {user_id for user_id, _ in user_and_device_ids} - user_map = await self.get_device_list_last_stream_id_for_remotes( - list(unique_user_ids) - ) - # We go and check if any of the users need to have their device lists - # resynced. If they do then we remove them from the cached list. - users_needing_resync = await self.get_user_ids_requiring_device_list_resync( + user_ids_in_cache = await self.get_users_whose_devices_are_cached( unique_user_ids ) - user_ids_in_cache = { - user_id for user_id, stream_id in user_map.items() if stream_id - } - users_needing_resync user_ids_not_in_cache = unique_user_ids - user_ids_in_cache # First fetch all the users which all devices are to be returned. @@ -792,6 +784,22 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): return user_ids_not_in_cache, results + async def get_users_whose_devices_are_cached( + self, user_ids: StrCollection + ) -> Set[str]: + """Checks which of the given users we have cached the devices for.""" + user_map = await self.get_device_list_last_stream_id_for_remotes(user_ids) + + # We go and check if any of the users need to have their device lists + # resynced. If they do then we remove them from the cached list. + users_needing_resync = await self.get_user_ids_requiring_device_list_resync( + user_ids + ) + user_ids_in_cache = { + user_id for user_id, stream_id in user_map.items() if stream_id + } - users_needing_resync + return user_ids_in_cache + @cached(num_args=2, tree=True) async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDict: content = await self.db_pool.simple_select_one_onecol( From d23c394669660a7226c818f222a76ec0905e126e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Sep 2023 13:06:00 +0100 Subject: [PATCH 440/562] Reduce CPU overhead of change password endpoint (#16264) --- changelog.d/16264.misc | 1 + synapse/rest/client/account.py | 112 ++++++++++++++++----------------- 2 files changed, 55 insertions(+), 58 deletions(-) create mode 100644 changelog.d/16264.misc diff --git a/changelog.d/16264.misc b/changelog.d/16264.misc new file mode 100644 index 0000000000..a744434bef --- /dev/null +++ b/changelog.d/16264.misc @@ -0,0 +1 @@ +Reduce CPU overhead of change password endpoint. diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 679ab9f266..196b292890 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -179,85 +179,81 @@ class PasswordRestServlet(RestServlet): # # In the second case, we require a password to confirm their identity. - requester = None - if self.auth.has_access_token(request): - requester = await self.auth.get_user_by_req(request) - try: + try: + requester = None + if self.auth.has_access_token(request): + requester = await self.auth.get_user_by_req(request) params, session_id = await self.auth_handler.validate_user_via_ui_auth( requester, request, body.dict(exclude_unset=True), "modify your account password", ) - except InteractiveAuthIncompleteError as e: - # The user needs to provide more steps to complete auth, but - # they're not required to provide the password again. - # - # If a password is available now, hash the provided password and - # store it for later. - if new_password: - new_password_hash = await self.auth_handler.hash(new_password) - await self.auth_handler.set_session_data( - e.session_id, - UIAuthSessionDataConstants.PASSWORD_HASH, - new_password_hash, - ) - raise - user_id = requester.user.to_string() - else: - try: + user_id = requester.user.to_string() + else: result, params, session_id = await self.auth_handler.check_ui_auth( [[LoginType.EMAIL_IDENTITY]], request, body.dict(exclude_unset=True), "modify your account password", ) - except InteractiveAuthIncompleteError as e: - # The user needs to provide more steps to complete auth, but - # they're not required to provide the password again. - # - # If a password is available now, hash the provided password and - # store it for later. - if new_password: - new_password_hash = await self.auth_handler.hash(new_password) - await self.auth_handler.set_session_data( - e.session_id, - UIAuthSessionDataConstants.PASSWORD_HASH, - new_password_hash, + + if LoginType.EMAIL_IDENTITY in result: + threepid = result[LoginType.EMAIL_IDENTITY] + if "medium" not in threepid or "address" not in threepid: + raise SynapseError(500, "Malformed threepid") + if threepid["medium"] == "email": + # For emails, canonicalise the address. + # We store all email addresses canonicalised in the DB. + # (See add_threepid in synapse/handlers/auth.py) + try: + threepid["address"] = validate_email(threepid["address"]) + except ValueError as e: + raise SynapseError(400, str(e)) + # if using email, we must know about the email they're authing with! + threepid_user_id = await self.datastore.get_user_id_by_threepid( + threepid["medium"], threepid["address"] ) + if not threepid_user_id: + raise SynapseError( + 404, "Email address not found", Codes.NOT_FOUND + ) + user_id = threepid_user_id + else: + logger.error("Auth succeeded but no known type! %r", result.keys()) + raise SynapseError(500, "", Codes.UNKNOWN) + + except InteractiveAuthIncompleteError as e: + # The user needs to provide more steps to complete auth, but + # they're not required to provide the password again. + # + # If a password is available now, hash the provided password and + # store it for later. We only do this if we don't already have the + # password hash stored, to avoid repeatedly hashing the password. + + if not new_password: raise - if LoginType.EMAIL_IDENTITY in result: - threepid = result[LoginType.EMAIL_IDENTITY] - if "medium" not in threepid or "address" not in threepid: - raise SynapseError(500, "Malformed threepid") - if threepid["medium"] == "email": - # For emails, canonicalise the address. - # We store all email addresses canonicalised in the DB. - # (See add_threepid in synapse/handlers/auth.py) - try: - threepid["address"] = validate_email(threepid["address"]) - except ValueError as e: - raise SynapseError(400, str(e)) - # if using email, we must know about the email they're authing with! - threepid_user_id = await self.datastore.get_user_id_by_threepid( - threepid["medium"], threepid["address"] - ) - if not threepid_user_id: - raise SynapseError(404, "Email address not found", Codes.NOT_FOUND) - user_id = threepid_user_id - else: - logger.error("Auth succeeded but no known type! %r", result.keys()) - raise SynapseError(500, "", Codes.UNKNOWN) + existing_session_password_hash = await self.auth_handler.get_session_data( + e.session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None + ) + if existing_session_password_hash: + raise + + new_password_hash = await self.auth_handler.hash(new_password) + await self.auth_handler.set_session_data( + e.session_id, + UIAuthSessionDataConstants.PASSWORD_HASH, + new_password_hash, + ) + raise # If we have a password in this request, prefer it. Otherwise, use the # password hash from an earlier request. if new_password: password_hash: Optional[str] = await self.auth_handler.hash(new_password) elif session_id is not None: - password_hash = await self.auth_handler.get_session_data( - session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None - ) + password_hash = existing_session_password_hash else: # UI validation was skipped, but the request did not include a new # password. From 583d5963e6179689ed7d01be5eec36733be0444f Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 8 Sep 2023 14:10:26 +0200 Subject: [PATCH 441/562] Raise setuptools_rust version cap to 1.7.0 (#16277) --- changelog.d/16277.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16277.misc diff --git a/changelog.d/16277.misc b/changelog.d/16277.misc new file mode 100644 index 0000000000..c131a46ec3 --- /dev/null +++ b/changelog.d/16277.misc @@ -0,0 +1 @@ +Raise setuptools_rust version cap to 1.7.0. diff --git a/pyproject.toml b/pyproject.toml index c17f4da72d..5b43abe907 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -370,7 +370,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.6.0"] +requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.7.0"] build-backend = "poetry.core.masonry.api" From 69b74d9330e42fc91a9c7423d00a06cd6d3732bf Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Sep 2023 08:57:56 -0400 Subject: [PATCH 442/562] Avoid temporary storage of sensitive information. (#16272) During the UI auth process, avoid storing sensitive information into the database. --- changelog.d/16272.bugfix | 1 + synapse/rest/client/account.py | 4 ++-- tests/rest/client/test_account.py | 13 +++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16272.bugfix diff --git a/changelog.d/16272.bugfix b/changelog.d/16272.bugfix new file mode 100644 index 0000000000..afb22a999f --- /dev/null +++ b/changelog.d/16272.bugfix @@ -0,0 +1 @@ +Avoid temporary storage of sensitive information. diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 196b292890..49cd0805fd 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -186,7 +186,7 @@ class PasswordRestServlet(RestServlet): params, session_id = await self.auth_handler.validate_user_via_ui_auth( requester, request, - body.dict(exclude_unset=True), + body.dict(exclude_unset=True, exclude={"new_password"}), "modify your account password", ) user_id = requester.user.to_string() @@ -194,7 +194,7 @@ class PasswordRestServlet(RestServlet): result, params, session_id = await self.auth_handler.check_ui_auth( [[LoginType.EMAIL_IDENTITY]], request, - body.dict(exclude_unset=True), + body.dict(exclude_unset=True, exclude={"new_password"}), "modify your account password", ) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index e9f495e206..4a0eca5b30 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -31,6 +31,7 @@ from synapse.rest import admin from synapse.rest.client import account, login, register, room from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from synapse.server import HomeServer +from synapse.storage._base import db_to_json from synapse.types import JsonDict, UserID from synapse.util import Clock @@ -134,6 +135,18 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): # Assert we can't log in with the old password self.attempt_wrong_password_login("kermit", old_password) + # Check that the UI Auth information doesn't store the password in the database. + # + # Note that we don't have the UI Auth session ID, so just pull out the single + # row. + ui_auth_data = self.get_success( + self.store.db_pool.simple_select_one( + "ui_auth_sessions", keyvalues={}, retcols=("clientdict",) + ) + ) + client_dict = db_to_json(ui_auth_data["clientdict"]) + self.assertNotIn("new_password", client_dict) + @override_config({"rc_3pid_validation": {"burst_count": 3}}) def test_ratelimit_by_email(self) -> None: """Test that we ratelimit /requestToken for the same email.""" From 9084429a6c6287f37eb6743a9cdc1731f4ea8ed4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 08:59:23 -0400 Subject: [PATCH 443/562] Bump gitpython from 3.1.34 to 3.1.35 (#16279) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d7bbfbd358..b577ae4f18 100644 --- a/poetry.lock +++ b/poetry.lock @@ -586,13 +586,13 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.34" +version = "3.1.35" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.34-py3-none-any.whl", hash = "sha256:5d3802b98a3bae1c2b8ae0e1ff2e4aa16bcdf02c145da34d092324f599f01395"}, - {file = "GitPython-3.1.34.tar.gz", hash = "sha256:85f7d365d1f6bf677ae51039c1ef67ca59091c7ebd5a3509aa399d4eda02d6dd"}, + {file = "GitPython-3.1.35-py3-none-any.whl", hash = "sha256:c19b4292d7a1d3c0f653858db273ff8a6614100d1eb1528b014ec97286193c09"}, + {file = "GitPython-3.1.35.tar.gz", hash = "sha256:9cbefbd1789a5fe9bcf621bb34d3f441f3a90c8461d377f84eda73e721d9b06b"}, ] [package.dependencies] From f43d99462413b0b572da2e52037db8b1135f5ea6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Sep 2023 14:43:01 +0100 Subject: [PATCH 444/562] Fix bug with new task scheduler using lots of CPU. (#16278) Using the new `TaskScheduler` meant that we'ed create lots of new metrics (due to adding task ID to the desc of background process), resulting in requests for metrics taking an increasing amount of CPU. --- changelog.d/16278.misc | 1 + synapse/util/task_scheduler.py | 43 +++++++++++++++++----------------- 2 files changed, 23 insertions(+), 21 deletions(-) create mode 100644 changelog.d/16278.misc diff --git a/changelog.d/16278.misc b/changelog.d/16278.misc new file mode 100644 index 0000000000..e82a470c45 --- /dev/null +++ b/changelog.d/16278.misc @@ -0,0 +1 @@ +Fix using the new task scheduler causing lots of CPU to be used. diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 9b2581e51a..b7de201bde 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -19,6 +19,7 @@ from prometheus_client import Gauge from twisted.python.failure import Failure +from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util.stringutils import random_string @@ -316,26 +317,27 @@ class TaskScheduler: function = self._actions[task.action] async def wrapper() -> None: - try: - (status, result, error) = await function(task) - except Exception: - f = Failure() - logger.error( - f"scheduled task {task.id} failed", - exc_info=(f.type, f.value, f.getTracebackObject()), - ) - status = TaskStatus.FAILED - result = None - error = f.getErrorMessage() + with nested_logging_context(task.id): + try: + (status, result, error) = await function(task) + except Exception: + f = Failure() + logger.error( + f"scheduled task {task.id} failed", + exc_info=(f.type, f.value, f.getTracebackObject()), + ) + status = TaskStatus.FAILED + result = None + error = f.getErrorMessage() - await self._store.update_scheduled_task( - task.id, - self._clock.time_msec(), - status=status, - result=result, - error=error, - ) - self._running_tasks.remove(task.id) + await self._store.update_scheduled_task( + task.id, + self._clock.time_msec(), + status=status, + result=result, + error=error, + ) + self._running_tasks.remove(task.id) if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return @@ -353,5 +355,4 @@ class TaskScheduler: self._running_tasks.add(task.id) await self.update_task(task.id, status=TaskStatus.ACTIVE) - description = f"{task.id}-{task.action}" - run_as_background_process(description, wrapper) + run_as_background_process(task.action, wrapper) From 5c8870cb28cc98d0b7a310b5dd2fade7ff45743d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Sep 2023 09:47:36 -0400 Subject: [PATCH 445/562] Fix-up incorrect spellings in docs. (#16282) --- changelog.d/16282.doc | 1 + docs/ancient_architecture_notes.md | 2 +- docs/changelogs/CHANGES-2019.md | 12 +- docs/changelogs/CHANGES-2020.md | 18 +-- docs/changelogs/CHANGES-2021.md | 4 +- docs/changelogs/CHANGES-2022.md | 18 +-- docs/changelogs/CHANGES-pre-1.0.md | 150 +++++++++--------- docs/consent_tracking.md | 4 +- docs/development/contributing_guide.md | 2 +- .../synapse_architecture/faster_joins.md | 2 +- docs/log_contexts.md | 2 +- docs/postgres.md | 2 +- docs/setup/installation.md | 4 +- docs/tcp_replication.md | 4 +- docs/turn-howto.md | 2 +- docs/upgrade.md | 6 +- .../administration/admin_api/federation.md | 2 +- .../configuration/config_documentation.md | 4 +- 18 files changed, 120 insertions(+), 119 deletions(-) create mode 100644 changelog.d/16282.doc diff --git a/changelog.d/16282.doc b/changelog.d/16282.doc new file mode 100644 index 0000000000..b249ea4f9f --- /dev/null +++ b/changelog.d/16282.doc @@ -0,0 +1 @@ +Fix typos in the documentation. diff --git a/docs/ancient_architecture_notes.md b/docs/ancient_architecture_notes.md index 07bb199d7a..005b434ab8 100644 --- a/docs/ancient_architecture_notes.md +++ b/docs/ancient_architecture_notes.md @@ -24,7 +24,7 @@ Server with a domain specific API. 1. **Messaging Layer** This is what the rest of the homeserver hits to send messages, join rooms, - etc. It also allows you to register callbacks for when it get's notified by + etc. It also allows you to register callbacks for when it gets notified by lower levels that e.g. a new message has been received. It is responsible for serializing requests to send to the data diff --git a/docs/changelogs/CHANGES-2019.md b/docs/changelogs/CHANGES-2019.md index a356cc49a3..1d01c83f07 100644 --- a/docs/changelogs/CHANGES-2019.md +++ b/docs/changelogs/CHANGES-2019.md @@ -164,7 +164,7 @@ Synapse 1.6.0rc2 (2019-11-25) Bugfixes -------- -- Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407)) +- Fix a bug which could cause the background database update handler for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407)) Synapse 1.6.0rc1 (2019-11-20) @@ -191,7 +191,7 @@ Bugfixes - Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306)) - Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307)) - Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313)) -- Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320)) +- Fix bug which caused rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320)) - Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335)) - Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338)) - Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359)) @@ -232,7 +232,7 @@ Internal Changes - Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305)) - Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308)) - Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312)) -- Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317)) +- Add optional python dependencies and dependent binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317)) - Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336)) - Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319)) - Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330)) @@ -653,7 +653,7 @@ Internal Changes - Return 502 not 500 when failing to reach any remote server. ([\#5810](https://github.com/matrix-org/synapse/issues/5810)) - Reduce global pauses in the events stream caused by expensive state resolution during persistence. ([\#5826](https://github.com/matrix-org/synapse/issues/5826)) - Add a lower bound to well-known lookup cache time to avoid repeated lookups. ([\#5836](https://github.com/matrix-org/synapse/issues/5836)) -- Whitelist history visbility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843)) +- Whitelist history visibility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843)) Synapse 1.2.1 (2019-07-26) @@ -817,7 +817,7 @@ See the [upgrade notes](docs/upgrade.md#upgrading-to-v110) for more details. Features -------- -- Added possibilty to disable local password authentication. Contributed by Daniel Hoffend. ([\#5092](https://github.com/matrix-org/synapse/issues/5092)) +- Added possibility to disable local password authentication. Contributed by Daniel Hoffend. ([\#5092](https://github.com/matrix-org/synapse/issues/5092)) - Add monthly active users to phonehome stats. ([\#5252](https://github.com/matrix-org/synapse/issues/5252)) - Allow expired user to trigger renewal email sending manually. ([\#5363](https://github.com/matrix-org/synapse/issues/5363)) - Statistics on forward extremities per room are now exposed via Prometheus. ([\#5384](https://github.com/matrix-org/synapse/issues/5384), [\#5458](https://github.com/matrix-org/synapse/issues/5458), [\#5461](https://github.com/matrix-org/synapse/issues/5461)) @@ -850,7 +850,7 @@ Bugfixes - Fix bug where clients could tight loop calling `/sync` for a period. ([\#5507](https://github.com/matrix-org/synapse/issues/5507)) - Fix bug with `jinja2` preventing Synapse from starting. Users who had this problem should now simply need to run `pip install matrix-synapse`. ([\#5514](https://github.com/matrix-org/synapse/issues/5514)) - Fix a regression where homeservers on private IP addresses were incorrectly blacklisted. ([\#5523](https://github.com/matrix-org/synapse/issues/5523)) -- Fixed m.login.jwt using unregistred user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. ([\#5555](https://github.com/matrix-org/synapse/issues/5555), [\#5586](https://github.com/matrix-org/synapse/issues/5586)) +- Fixed m.login.jwt using unregistered user_id and added pyjwt>=1.6.4 as jwt conditional dependencies. Contributed by Pau Rodriguez-Estivill. ([\#5555](https://github.com/matrix-org/synapse/issues/5555), [\#5586](https://github.com/matrix-org/synapse/issues/5586)) - Fix a bug that would cause invited users to receive several emails for a single 3PID invite in case the inviter is rate limited. ([\#5576](https://github.com/matrix-org/synapse/issues/5576)) diff --git a/docs/changelogs/CHANGES-2020.md b/docs/changelogs/CHANGES-2020.md index 6b87022251..c3739796fb 100644 --- a/docs/changelogs/CHANGES-2020.md +++ b/docs/changelogs/CHANGES-2020.md @@ -251,7 +251,7 @@ Internal Changes - Optimise `/createRoom` with multiple invited users. ([\#8559](https://github.com/matrix-org/synapse/issues/8559)) - Implement and use an `@lru_cache` decorator. ([\#8595](https://github.com/matrix-org/synapse/issues/8595)) -- Don't instansiate Requester directly. ([\#8614](https://github.com/matrix-org/synapse/issues/8614)) +- Don't instantiate Requester directly. ([\#8614](https://github.com/matrix-org/synapse/issues/8614)) - Type hints for `RegistrationStore`. ([\#8615](https://github.com/matrix-org/synapse/issues/8615)) - Change schema to support access tokens belonging to one user but granting access to another. ([\#8616](https://github.com/matrix-org/synapse/issues/8616)) - Remove unused OPTIONS handlers. ([\#8621](https://github.com/matrix-org/synapse/issues/8621)) @@ -518,7 +518,7 @@ Bugfixes - Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. ([\#8278](https://github.com/matrix-org/synapse/issues/8278)) - Fix edge case where push could get delayed for a user until a later event was pushed. ([\#8287](https://github.com/matrix-org/synapse/issues/8287)) - Fix fetching malformed events from remote servers. ([\#8324](https://github.com/matrix-org/synapse/issues/8324)) -- Fix `UnboundLocalError` from occuring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329)) +- Fix `UnboundLocalError` from occurring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329)) - Don't send push notifications to expired user accounts. ([\#8353](https://github.com/matrix-org/synapse/issues/8353)) - Fix a regression in v1.19.0 with reactivating users through the admin API. ([\#8362](https://github.com/matrix-org/synapse/issues/8362)) - Fix a bug where during device registration the length of the device name wasn't limited. ([\#8364](https://github.com/matrix-org/synapse/issues/8364)) @@ -815,7 +815,7 @@ Bugfixes - Fix a bug introduced in Synapse v1.7.2 which caused inaccurate membership counts in the room directory. ([\#7977](https://github.com/matrix-org/synapse/issues/7977)) - Fix a long standing bug: 'Duplicate key value violates unique constraint "event_relations_id"' when message retention is configured. ([\#7978](https://github.com/matrix-org/synapse/issues/7978)) - Fix "no create event in auth events" when trying to reject invitation after inviter leaves. Bug introduced in Synapse v1.10.0. ([\#7980](https://github.com/matrix-org/synapse/issues/7980)) -- Fix various comments and minor discrepencies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996)) +- Fix various comments and minor discrepancies in server notices code. ([\#7996](https://github.com/matrix-org/synapse/issues/7996)) - Fix a long standing bug where HTTP HEAD requests resulted in a 400 error. ([\#7999](https://github.com/matrix-org/synapse/issues/7999)) - Fix a long-standing bug which caused two copies of some log lines to be written when synctl was used along with a MemoryHandler logger. ([\#8011](https://github.com/matrix-org/synapse/issues/8011), [\#8012](https://github.com/matrix-org/synapse/issues/8012)) @@ -1460,7 +1460,7 @@ Bugfixes - Transfer alias mappings on room upgrade. ([\#6946](https://github.com/matrix-org/synapse/issues/6946)) - Ensure that a user interactive authentication session is tied to a single request. ([\#7068](https://github.com/matrix-org/synapse/issues/7068), [\#7455](https://github.com/matrix-org/synapse/issues/7455)) - Fix a bug in the federation API which could cause occasional "Failed to get PDU" errors. ([\#7089](https://github.com/matrix-org/synapse/issues/7089)) -- Return the proper error (`M_BAD_ALIAS`) when a non-existant canonical alias is provided. ([\#7109](https://github.com/matrix-org/synapse/issues/7109)) +- Return the proper error (`M_BAD_ALIAS`) when a non-existent canonical alias is provided. ([\#7109](https://github.com/matrix-org/synapse/issues/7109)) - Fix a bug which meant that groups updates were not correctly replicated between workers. ([\#7117](https://github.com/matrix-org/synapse/issues/7117)) - Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133)) - Ensure `is_verified` is a boolean in responses to `GET /_matrix/client/r0/room_keys/keys`. Also warn the user if they forgot the `version` query param. ([\#7150](https://github.com/matrix-org/synapse/issues/7150)) @@ -1482,7 +1482,7 @@ Bugfixes - Fix bad error handling that would cause Synapse to crash if it's provided with a YAML configuration file that's either empty or doesn't parse into a key-value map. ([\#7341](https://github.com/matrix-org/synapse/issues/7341)) - Fix incorrect metrics reporting for `renew_attestations` background task. ([\#7344](https://github.com/matrix-org/synapse/issues/7344)) - Prevent non-federating rooms from appearing in responses to federated `POST /publicRoom` requests when a filter was included. ([\#7367](https://github.com/matrix-org/synapse/issues/7367)) -- Fix a bug which would cause the room durectory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](docs/upgrade.md#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387)) +- Fix a bug which would cause the room directory to be incorrectly populated if Synapse was upgraded directly from v1.2.1 or earlier to v1.4.0 or later. Note that this fix does not apply retrospectively; see the [upgrade notes](docs/upgrade.md#upgrading-to-v1130) for more information. ([\#7387](https://github.com/matrix-org/synapse/issues/7387)) - Fix bug in `EventContext.deserialize`. ([\#7393](https://github.com/matrix-org/synapse/issues/7393)) @@ -1638,7 +1638,7 @@ Security advisory ----------------- Synapse may be vulnerable to request-smuggling attacks when it is used with a -reverse-proxy. The vulnerabilties are fixed in Twisted 20.3.0, and are +reverse-proxy. The vulnerabilities are fixed in Twisted 20.3.0, and are described in [CVE-2020-10108](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10108) and @@ -1748,7 +1748,7 @@ Internal Changes - Refactoring work in preparation for changing the event redaction algorithm. ([\#6874](https://github.com/matrix-org/synapse/issues/6874), [\#6875](https://github.com/matrix-org/synapse/issues/6875), [\#6983](https://github.com/matrix-org/synapse/issues/6983), [\#7003](https://github.com/matrix-org/synapse/issues/7003)) - Improve performance of v2 state resolution for large rooms. ([\#6952](https://github.com/matrix-org/synapse/issues/6952), [\#7095](https://github.com/matrix-org/synapse/issues/7095)) - Reduce time spent doing GC, by freezing objects on startup. ([\#6953](https://github.com/matrix-org/synapse/issues/6953)) -- Minor perfermance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954)) +- Minor performance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954)) - Don't record remote cross-signing keys in the `devices` table. ([\#6956](https://github.com/matrix-org/synapse/issues/6956)) - Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. ([\#6957](https://github.com/matrix-org/synapse/issues/6957)) - Merge worker apps together. ([\#6964](https://github.com/matrix-org/synapse/issues/6964), [\#7002](https://github.com/matrix-org/synapse/issues/7002), [\#7055](https://github.com/matrix-org/synapse/issues/7055), [\#7104](https://github.com/matrix-org/synapse/issues/7104)) @@ -1809,7 +1809,7 @@ Bugfixes - Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting. ([\#6825](https://github.com/matrix-org/synapse/issues/6825)) - Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. ([\#6849](https://github.com/matrix-org/synapse/issues/6849)) - Fix errors from logging in the purge jobs related to the message retention policies support. ([\#6945](https://github.com/matrix-org/synapse/issues/6945)) -- Return a 404 instead of 200 for querying information of a non-existant user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901)) +- Return a 404 instead of 200 for querying information of a non-existent user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901)) Updates to the Docker image @@ -1889,7 +1889,7 @@ Bugfixes Synapse 1.10.0rc4 (2020-02-11) ============================== -This release candidate was built incorrectly and is superceded by 1.10.0rc5. +This release candidate was built incorrectly and is superseded by 1.10.0rc5. Synapse 1.10.0rc3 (2020-02-10) ============================== diff --git a/docs/changelogs/CHANGES-2021.md b/docs/changelogs/CHANGES-2021.md index 8e349504d5..2247d6333c 100644 --- a/docs/changelogs/CHANGES-2021.md +++ b/docs/changelogs/CHANGES-2021.md @@ -2270,7 +2270,7 @@ Features Bugfixes -------- -- Fix spurious errors in logs when deleting a non-existant pusher. ([\#9121](https://github.com/matrix-org/synapse/issues/9121)) +- Fix spurious errors in logs when deleting a non-existent pusher. ([\#9121](https://github.com/matrix-org/synapse/issues/9121)) - Fix a long-standing bug where Synapse would return a 500 error when a thumbnail did not exist (and auto-generation of thumbnails was not enabled). ([\#9163](https://github.com/matrix-org/synapse/issues/9163)) - Fix a long-standing bug where an internal server error was raised when attempting to preview an HTML document in an unknown character encoding. ([\#9164](https://github.com/matrix-org/synapse/issues/9164)) - Fix a long-standing bug where invalid data could cause errors when calculating the presentable room name for push. ([\#9165](https://github.com/matrix-org/synapse/issues/9165)) @@ -2522,7 +2522,7 @@ Bugfixes - Fix a long-standing bug where a `m.image` event without a `url` would cause errors on push. ([\#8965](https://github.com/matrix-org/synapse/issues/8965)) - Fix a small bug in v2 state resolution algorithm, which could also cause performance issues for rooms with large numbers of power levels. ([\#8971](https://github.com/matrix-org/synapse/issues/8971)) - Add validation to the `sendToDevice` API to raise a missing parameters error instead of a 500 error. ([\#8975](https://github.com/matrix-org/synapse/issues/8975)) -- Add validation of group IDs to raise a 400 error instead of a 500 eror. ([\#8977](https://github.com/matrix-org/synapse/issues/8977)) +- Add validation of group IDs to raise a 400 error instead of a 500 error. ([\#8977](https://github.com/matrix-org/synapse/issues/8977)) Improved Documentation diff --git a/docs/changelogs/CHANGES-2022.md b/docs/changelogs/CHANGES-2022.md index 81e2849516..a9eced7c9e 100644 --- a/docs/changelogs/CHANGES-2022.md +++ b/docs/changelogs/CHANGES-2022.md @@ -208,7 +208,7 @@ Improved Documentation ---------------------- - Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370)) -- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293)) +- Add additional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293)) - Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297)) - Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414)) @@ -490,7 +490,7 @@ Internal Changes - When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214)) - Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224)) - Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217)) -- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221)) +- Rename the `cache_memory` extra to `cache-memory`, for compatibility with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221)) - Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227)) @@ -534,7 +534,7 @@ Bugfixes Internal Changes ---------------- -- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085)) +- Rename the `url_preview` extra to `url-preview`, for compatibility with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085)) Synapse 1.69.0rc2 (2022-10-06) @@ -719,7 +719,7 @@ Improved Documentation - Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480)) - Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726)) - Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727)) -- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728)) +- Define Synapse's compatibility policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728)) - Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785)) - Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794)) @@ -2035,7 +2035,7 @@ Internal Changes - Add opentracing spans to calls to external cache. ([\#12380](https://github.com/matrix-org/synapse/issues/12380)) - Lay groundwork for using `poetry` to manage Synapse's dependencies. ([\#12381](https://github.com/matrix-org/synapse/issues/12381), [\#12407](https://github.com/matrix-org/synapse/issues/12407), [\#12412](https://github.com/matrix-org/synapse/issues/12412), [\#12418](https://github.com/matrix-org/synapse/issues/12418)) - Make missing `importlib_metadata` dependency explicit. ([\#12384](https://github.com/matrix-org/synapse/issues/12384), [\#12400](https://github.com/matrix-org/synapse/issues/12400)) -- Update type annotations for compatiblity with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389)) +- Update type annotations for compatibility with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389)) - Remove support for the unstable identifiers specified in [MSC3288](https://github.com/matrix-org/matrix-doc/pull/3288). ([\#12398](https://github.com/matrix-org/synapse/issues/12398)) - Add missing type hints to configuration classes. ([\#12402](https://github.com/matrix-org/synapse/issues/12402)) - Add files used to build the Docker image used for complement testing into the Synapse repository. ([\#12404](https://github.com/matrix-org/synapse/issues/12404)) @@ -2207,7 +2207,7 @@ Deprecations and Removals - **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** - **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) -- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) +- Remove backwards compatibility with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) - The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) @@ -2586,10 +2586,10 @@ Bugfixes Improved Documentation ---------------------- -- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) +- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contributed by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) - Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715)) - Document that the minimum supported PostgreSQL version is now 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) -- Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) +- Fix typo in demo docs: different. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) - Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) - Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) - Update documentation for configuring login with Facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) @@ -2707,7 +2707,7 @@ Improved Documentation - Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. ([\#11267](https://github.com/matrix-org/synapse/issues/11267)) - Document the usage of refresh tokens. ([\#11427](https://github.com/matrix-org/synapse/issues/11427)) -- Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553)) +- Add details for how to configure a TURN server when behind a NAT. Contributed by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553)) - Add references for using Postgres to the Docker documentation. ([\#11640](https://github.com/matrix-org/synapse/issues/11640)) - Fix the documentation link in newly-generated configuration files. ([\#11678](https://github.com/matrix-org/synapse/issues/11678)) - Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. ([\#11680](https://github.com/matrix-org/synapse/issues/11680)) diff --git a/docs/changelogs/CHANGES-pre-1.0.md b/docs/changelogs/CHANGES-pre-1.0.md index bcd33d2256..e414dbb3b1 100644 --- a/docs/changelogs/CHANGES-pre-1.0.md +++ b/docs/changelogs/CHANGES-pre-1.0.md @@ -823,7 +823,7 @@ Bugfixes - Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960)) - Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961)) - Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968)) -- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970)) +- Replaced all occurrences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970)) - Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986)) - Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990)) @@ -841,7 +841,7 @@ Internal Changes - Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911)) - Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912)) - Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924)) -- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925)) +- Fix spurious exceptions when remote http client closes connection ([\#3925](https://github.com/matrix-org/synapse/issues/3925)) - Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927)) - Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991)) - Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946)) @@ -1057,7 +1057,7 @@ Bugfixes - Make the tests pass on Twisted < 18.7.0 ([\#3676](https://github.com/matrix-org/synapse/issues/3676)) - Don’t ship recaptcha_ajax.js, use it directly from Google ([\#3677](https://github.com/matrix-org/synapse/issues/3677)) - Fixes test_reap_monthly_active_users so it passes under postgres ([\#3681](https://github.com/matrix-org/synapse/issues/3681)) -- Fix mau blocking calulation bug on login ([\#3689](https://github.com/matrix-org/synapse/issues/3689)) +- Fix mau blocking calculation bug on login ([\#3689](https://github.com/matrix-org/synapse/issues/3689)) - Fix missing yield in synapse.storage.monthly_active_users.initialise_reserved_users ([\#3692](https://github.com/matrix-org/synapse/issues/3692)) - Improve HTTP request logging to include all requests ([\#3700](https://github.com/matrix-org/synapse/issues/3700)) - Avoid timing out requests while we are streaming back the response ([\#3701](https://github.com/matrix-org/synapse/issues/3701)) @@ -1314,10 +1314,10 @@ Changes: - Remove users from user directory on deactivate (PR #3277) - Avoid sending consent notice to guest users (PR #3288) - disable CPUMetrics if no /proc/self/stat (PR #3299) -- Consistently use six\'s iteritems and wrap lazy keys/values in list() if they\'re not meant to be lazy (PR #3307) +- Consistently use six's iteritems and wrap lazy keys/values in list() if they're not meant to be lazy (PR #3307) - Add private IPv6 addresses to example config for url preview blacklist (PR #3317) Thanks to @thegcat! - Reduce stuck read-receipts: ignore depth when updating (PR #3318) -- Put python\'s logs into Trial when running unit tests (PR #3319) +- Put python's logs into Trial when running unit tests (PR #3319) Changes, python 3 migration: @@ -1344,13 +1344,13 @@ Changes in synapse v0.30.0 (2018-05-24) \'Server Notices\' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server. -They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as \"Message of the day\". +They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as "Message of the day". This feature is specific to Synapse, but uses standard Matrix communication mechanisms, so should work with any Matrix client. For more details see `docs/server_notices.md` Further Server Notices/Consent Tracking Support: -- Allow overriding the server\_notices user\'s avatar (PR #3273) +- Allow overriding the server\_notices user's avatar (PR #3273) - Use the localpart in the consent uri (PR #3272) - Support for putting %(consent\_uri)s in messages (PR #3271) - Block attempts to send server notices to remote users (PR #3270) @@ -1380,7 +1380,7 @@ Changes: - Remove unused update\_external\_syncs (PR #3233) - Use stream rather depth ordering for push actions (PR #3212) - Make purge\_history operate on tokens (PR #3221) -- Don\'t support limitless pagination (PR #3265) +- Don't support limitless pagination (PR #3265) Bug Fixes: @@ -1454,7 +1454,7 @@ Changes - Python 3 migration: - Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile! - Use six.moves.urlparse (PR #3108) Thanks to @NotAFile! - Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile! -- Don\'t yield in list comprehensions (PR #3150) Thanks to @NotAFile! +- Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile! - Move more xrange to six (PR #3151) Thanks to @NotAFile! - make imports local (PR #3152) Thanks to @NotAFile! - move httplib import to six (PR #3153) Thanks to @NotAFile! @@ -1556,7 +1556,7 @@ v0.27.3-rc1 used a stale version of the develop branch so the changelog overstat Changes in synapse v0.27.3-rc1 (2018-04-09) =========================================== -Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overal growth of the Matrix ecosystem. It is defined as:- +Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:- Counts the number of native 30 day retained users, defined as:- \* Users who have created their accounts more than 30 days @@ -1654,7 +1654,7 @@ Changes: - No longer require a specific version of saml2 (PR #2695) Thanks to @okurz! - Remove `verbosity`/`log_file` from generated config (PR #2755) - Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838) -- When using synctl with workers, don\'t start the main synapse automatically (PR #2774) +- When using synctl with workers, Don't start the main synapse automatically (PR #2774) - Minor performance improvements (PR #2773, #2792) - Use a connection pool for non-federation outbound connections (PR #2817) - Make it possible to run unit tests against postgres (PR #2829) @@ -1848,7 +1848,7 @@ Changes: Bug fixes: - Fix caching error in the push evaluator (PR #2332) -- Fix bug where pusherpool didn\'t start and broke some rooms (PR #2342) +- Fix bug where pusherpool didn't start and broke some rooms (PR #2342) - Fix port script for user directory tables (PR #2375) - Fix device lists notifications when user rejoins a room (PR #2443, #2449) - Fix sync to always send down current state events in timeline (PR #2451) @@ -1860,7 +1860,7 @@ Changes in synapse v0.22.1 (2017-07-06) Bug fixes: -- Fix bug where pusher pool didn\'t start and caused issues when interacting with some rooms (PR #2342) +- Fix bug where pusher pool didn't start and caused issues when interacting with some rooms (PR #2342) Changes in synapse v0.22.0 (2017-07-06) ======================================= @@ -1933,7 +1933,7 @@ Changes: - Various small performance fixes (PR #2201, #2202, #2224, #2226, #2227, #2228, #2229) - Update username availability checker API (PR #2209, #2213) -- When purging, don\'t de-delta state groups we\'re about to delete (PR #2214) +- When purging, Don't de-delta state groups we're about to delete (PR #2214) - Documentation to check synapse version (PR #2215) Thanks to @hamber-dick! - Add an index to event\_search to speed up purge history API (PR #2218) @@ -1982,7 +1982,7 @@ Bug fixes: - Fix invite state to always include all events (PR #2163) - Fix bug where synapse would always fetch state for any missing event (PR #2170) - Fix a leak with timed out HTTP connections (PR #2180) -- Fix bug where we didn\'t time out HTTP requests to ASes (PR #2192) +- Fix bug where we didn't time out HTTP requests to ASes (PR #2192) Docs: @@ -2016,7 +2016,7 @@ Changes: - Minor `/sync` performance improvements. (PR #2002, #2013, #2022) - Add some debug to help diagnose weird federation issue (PR #2035) - Correctly limit retries for all federation requests (PR #2050, #2061) -- Don\'t lock table when persisting new one time keys (PR #2053) +- Don't lock table when persisting new one time keys (PR #2053) - Reduce some CPU work on DB threads (PR #2054) - Cache hosts in room (PR #2060) - Batch sending of device list pokes (PR #2063) @@ -2033,7 +2033,7 @@ Bug fixes: - Fix bug when federation received a PDU while a room join is in progress (PR #2016) - Fix resetting state on rejected events (PR #2025) - Fix installation issues in readme. Thanks @ricco386 (PR #2037) -- Fix caching of remote servers\' signature keys (PR #2042) +- Fix caching of remote servers' signature keys (PR #2042) - Fix some leaking log context (PR #2048, #2049, #2057, #2058) - Fix rejection of invites not reaching sync (PR #2056) @@ -2060,7 +2060,7 @@ Changes: - Reduce database table sizes (PR #1873, #1916, #1923, #1963) - Update contrib/ to not use syutil. Thanks to andrewshadura! (PR #1907) -- Don\'t fetch current state when sending an event in common case (PR #1955) +- Don't fetch current state when sending an event in common case (PR #1955) Bug fixes: @@ -2068,7 +2068,7 @@ Bug fixes: - Fix caching to not cache error responses (PR #1913) - Fix APIs to make kick & ban reasons work (PR #1917) - Fix bugs in the /keys/changes api (PR #1921) -- Fix bug where users couldn\'t forget rooms they were banned from (PR #1922) +- Fix bug where users couldn't forget rooms they were banned from (PR #1922) - Fix issue with long language values in pushers API (PR #1925) - Fix a race in transaction queue (PR #1930) - Fix dynamic thumbnailing to preserve aspect ratio. Thanks to jkolo! (PR #1945) @@ -2129,7 +2129,7 @@ Changes: - Measure size of some caches by sum of the size of cached values (PR #1815) - Measure metrics of string\_cache (PR #1821) - Reduce logging verbosity (PR #1822, #1823, #1824) -- Don\'t clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852) +- Don't clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852) - Better handle 401/404 response for federation /send/ (PR #1866, #1871) Fixes: @@ -2141,7 +2141,7 @@ Fixes: Performance: -- Don\'t block messages sending on bumping presence (PR #1789) +- Don't block messages sending on bumping presence (PR #1789) - Change device\_inbox stream index to include user (PR #1793) - Optimise state resolution (PR #1818) - Use DB cache of joined users for presence (PR #1862) @@ -2157,7 +2157,7 @@ Changes in synapse v0.18.7-rc2 (2017-01-07) Bug fixes: -- Fix error in rc1\'s discarding invalid inbound traffic logic that was incorrectly discarding missing events +- Fix error in rc1's discarding invalid inbound traffic logic that was incorrectly discarding missing events Changes in synapse v0.18.7-rc1 (2017-01-06) =========================================== @@ -2181,7 +2181,7 @@ Changes in synapse v0.18.6-rc3 (2017-01-05) Bug fixes: - Fix bug where we failed to send ban events to the banned server (PR #1758) -- Fix bug where we sent event that didn\'t originate on this server to other servers (PR #1764) +- Fix bug where we sent event that didn't originate on this server to other servers (PR #1764) - Fix bug where processing an event from a remote server took a long time because we were making long HTTP requests (PR #1765, PR #1744) Changes: @@ -2208,7 +2208,7 @@ Changes in synapse v0.18.5 (2016-12-16) Bug fixes: -- Fix federation /backfill returning events it shouldn\'t (PR #1700) +- Fix federation /backfill returning events it shouldn't (PR #1700) - Fix crash in url preview (PR #1701) Changes in synapse v0.18.5-rc3 (2016-12-13) @@ -2231,11 +2231,11 @@ Changes: Bug fixes: -- Fix handling of 500 and 429\'s over federation (PR #1650) +- Fix handling of 500 and 429's over federation (PR #1650) - Fix Content-Type header parsing (PR #1660) - Fix error when previewing sites that include unicode, thanks to kyrias (PR #1664) - Fix some cases where we drop read receipts (PR #1678) -- Fix bug where calls to `/sync` didn\'t correctly timeout (PR #1683) +- Fix bug where calls to `/sync` didn't correctly timeout (PR #1683) - Fix bug where E2E key query would fail if a single remote host failed (PR #1686) Changes in synapse v0.18.5-rc2 (2016-11-24) @@ -2243,7 +2243,7 @@ Changes in synapse v0.18.5-rc2 (2016-11-24) Bug fixes: -- Don\'t send old events over federation, fixes bug in -rc1. +- Don't send old events over federation, fixes bug in -rc1. Changes in synapse v0.18.5-rc1 (2016-11-24) =========================================== @@ -2254,7 +2254,7 @@ Features: Changes: -- Use external ldap auth pacakge (PR #1628) +- Use external ldap auth package (PR #1628) - Split out federation transaction sending to a worker (PR #1635) - Fail with a coherent error message if /sync?filter= is invalid (PR #1636) - More efficient notif count queries (PR #1644) @@ -2289,7 +2289,7 @@ SECURITY UPDATE Explicitly require authentication when using LDAP3. This is the default on versions of `ldap3` above 1.0, but some distributions will package an older version. -If you are using LDAP3 login and have a version of `ldap3` older than 1.0 it is **CRITICAL to updgrade**. +If you are using LDAP3 login and have a version of `ldap3` older than 1.0 it is **CRITICAL to upgrade**. Changes in synapse v0.18.2 (2016-11-01) ======================================= @@ -2440,7 +2440,7 @@ Features: Changes: - Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068) -- Don\'t notify for online to online presence transitions. (PR #1054) +- Don't notify for online to online presence transitions. (PR #1054) - Occasionally persist unpersisted presence updates (PR #1055) - Allow application services to have an optional \'url\' (PR #1056) - Clean up old sent transactions from DB (PR #1059) @@ -2472,7 +2472,7 @@ Features: Changes: -- Don\'t print stack traces when failing to get remote keys (PR #996) +- Don't print stack traces when failing to get remote keys (PR #996) - Various federation /event/ perf improvements (PR #998) - Only process one local membership event per room at a time (PR #1005) - Move default display name push rule (PR #1011, #1023) @@ -2488,7 +2488,7 @@ Bug fixes: - Fix /sync to not clobber status\_msg (PR #997) - Fix redacted state events to include prev\_content (PR #1003) - Fix some bugs in the auth/ldap handler (PR #1007) -- Fix backfill request to limit URI length, so that remotes don\'t reject the requests due to path length limits (PR #1012) +- Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits (PR #1012) - Fix AS push code to not send duplicate events (PR #1025) Changes in synapse v0.17.0 (2016-08-08) @@ -2577,8 +2577,8 @@ Changes: - Send the correct host header when fetching keys (PR #941) - Log the hostname the reCAPTCHA was completed on (PR #946) - Make the device id on e2e key upload optional (PR #956) -- Add r0.2.0 to the \"supported versions\" list (PR #960) -- Don\'t include name of room for invites in push (PR #961) +- Add r0.2.0 to the "supported versions" list (PR #960) +- Don't include name of room for invites in push (PR #961) Bug fixes: @@ -2596,7 +2596,7 @@ Changes in synapse v0.16.1-r1 (2016-07-08) THIS IS A CRITICAL SECURITY UPDATE. -This fixes a bug which allowed users\' accounts to be accessed by unauthorised users. +This fixes a bug which allowed users' accounts to be accessed by unauthorised users. Changes in synapse v0.16.1 (2016-06-20) ======================================= @@ -2619,7 +2619,7 @@ Features: None Changes: - Log requester for `/publicRoom` endpoints when possible (PR #856) -- 502 on `/thumbnail` when can\'t connect to remote server (PR #862) +- 502 on `/thumbnail` when can't connect to remote server (PR #862) - Linearize fetching of gaps on incoming events (PR #871) Bugs fixes: @@ -2640,7 +2640,7 @@ NB: As of v0.14 all AS config files must have an ID field. Bug fixes: -- Don\'t make rooms published by default (PR #857) +- Don't make rooms published by default (PR #857) Changes in synapse v0.16.0-rc2 (2016-06-08) =========================================== @@ -2658,7 +2658,7 @@ Bug fixes: - Fix \'From\' header in email notifications (PR #843) - Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842) -- Fix bug where synapse sent malformed transactions to AS\'s when retrying transactions (Commits 310197b, 8437906) +- Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906) Performance improvements: @@ -2685,7 +2685,7 @@ Changes: - Report per request metrics for all of the things using request\_handler (PR #756) - Correctly handle `NULL` password hashes from the database (PR #775) -- Allow receipts for events we haven\'t seen in the db (PR #784) +- Allow receipts for events we haven't seen in the db (PR #784) - Make synctl read a cache factor from config file (PR #785) - Increment badge count per missed convo, not per msg (PR #793) - Special case m.room.third\_party\_invite event auth to match invites (PR #814) @@ -2737,7 +2737,7 @@ Changes: Bug fixes: - Fix bug where disabling all notifications still resulted in push (PR #678) -- Fix bug where users couldn\'t reject remote invites if remote refused (PR #691) +- Fix bug where users couldn't reject remote invites if remote refused (PR #691) - Fix bug where synapse attempted to backfill from itself (PR #693) - Fix bug where profile information was not correctly added when joining remote rooms (PR #703) - Fix bug where register API required incorrect key name for AS registration (PR #727) @@ -2775,7 +2775,7 @@ Features: - Add event\_id to response to state event PUT (PR #581) - Allow guest users access to messages in rooms they have joined (PR #587) - Add config for what state is included in a room invite (PR #598) -- Send the inviter\'s member event in room invite state (PR #607) +- Send the inviter's member event in room invite state (PR #607) - Add error codes for malformed/bad JSON in /login (PR #608) - Add support for changing the actions for default rules (PR #609) - Add environment variable SYNAPSE\_CACHE\_FACTOR, default it to 0.1 (PR #612) @@ -2788,7 +2788,7 @@ Changes: - Make adding push rules idempotent (PR #587) - Improve presence performance (PR #582, #586) - Change presence semantics for `last_active_ago` (PR #582, #586) -- Don\'t allow `m.room.create` to be changed (PR #596) +- Don't allow `m.room.create` to be changed (PR #596) - Add 800x600 to default list of valid thumbnail sizes (PR #616) - Always include kicks and bans in full /sync (PR #625) - Send history visibility on boundary changes (PR #626) @@ -2854,7 +2854,7 @@ Features: Changes: -- Change `/sync` so that guest users only get rooms they\'ve joined (PR #469) +- Change `/sync` so that guest users only get rooms they've joined (PR #469) - Change to require unbanning before other membership changes (PR #501) - Change default push rules to notify for all messages (PR #486) - Change default push rules to not notify on membership changes (PR #514) @@ -2863,12 +2863,12 @@ Changes: - Change server manhole to use SSH rather than telnet (PR #473) - Change server to require AS users to be registered before use (PR #487) - Change server not to start when ASes are invalidly configured (PR #494) -- Change server to require ID and `as_token` to be unique for AS\'s (PR #496) +- Change server to require ID and `as_token` to be unique for AS's (PR #496) - Change maximum pagination limit to 1000 (PR #497) Bug fixes: -- Fix bug where `/sync` didn\'t return when something under the leave key changed (PR #461) +- Fix bug where `/sync` didn't return when something under the leave key changed (PR #461) - Fix bug where we returned smaller rather than larger than requested thumbnails when `method=crop` (PR #464) - Fix thumbnails API to only return cropped thumbnails when asking for a cropped thumbnail (PR #475) - Fix bug where we occasionally still logged access tokens (PR #477) @@ -2888,7 +2888,7 @@ Changes in synapse v0.12.0-rc3 (2015-12-23) - Allow guest accounts access to `/sync` (PR #455) - Allow filters to include/exclude rooms at the room level rather than just from the components of the sync for each room. (PR #454) - Include urls for room avatars in the response to `/publicRooms` (PR #453) -- Don\'t set a identicon as the avatar for a user when they register (PR #450) +- Don't set a identicon as the avatar for a user when they register (PR #450) - Add a `display_name` to third-party invites (PR #449) - Send more information to the identity server for third-party invites so that it can send richer messages to the invitee (PR #446) - Cache the responses to `/initialSync` for 5 minutes. If a client retries a request to `/initialSync` before the a response was computed to the first request then the same response is used for both requests (PR #457) @@ -2917,7 +2917,7 @@ Changes in synapse v0.12.0-rc1 (2015-12-10) - Filter JSON objects may now be passed as query parameters to `/sync` (PR #431) - Fix implementation of `/admin/whois` (PR #418) - Only include the rooms that user has left in `/sync` if the client requests them in the filter (PR #423) - - Don\'t push for `m.room.message` by default (PR #411) + - Don't push for `m.room.message` by default (PR #411) - Add API for setting per account user data (PR #392) - Allow users to forget rooms (PR #385) - Performance improvements and monitoring: @@ -2932,8 +2932,8 @@ Changes in synapse v0.11.1 (2015-11-20) ======================================= - Add extra options to search API (PR #394) -- Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to ressurected servers, even when they were receiving traffic from them (PR #393) -- Don\'t advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391) +- Fix bug where we did not correctly cap federation retry timers. This meant it could take several hours for servers to start talking to resurrected servers, even when they were receiving traffic from them (PR #393) +- Don't advertise login token flow unless CAS is enabled. This caused issues where some clients would always use the fallback API if they did not recognize all login flows (PR #391) - Change /v2 sync API to rename `private_user_data` to `account_data` (PR #386) - Change /v2 sync API to remove the `event_map` and rename keys in `rooms` object (PR #389) @@ -2973,7 +2973,7 @@ Changes in synapse v0.11.0-rc1 (2015-11-11) - Change retry schedule for application services (PR #320) - Change retry schedule for remote servers (PR #340) - Fix bug where we hosted static content in the incorrect place (PR #329) -- Fix bug where we didn\'t increment retry interval for remote servers (PR #343) +- Fix bug where we didn't increment retry interval for remote servers (PR #343) Changes in synapse v0.10.1-rc1 (2015-10-15) =========================================== @@ -3058,9 +3058,9 @@ General: - Error if a user tries to register with an email already in use. (PR #211) - Add extra and improve existing caches (PR #212, #219, #226, #228) - Batch various storage request (PR #226, #228) -- Fix bug where we didn\'t correctly log the entity that triggered the request if the request came in via an application service (PR #230) +- Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service (PR #230) - Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232) -- Add support for AS\'s to use v2\_alpha registration API (PR #210) +- Add support for AS's to use v2\_alpha registration API (PR #210) Configuration: @@ -3148,7 +3148,7 @@ Configuration: Federation: - Improve resilience of backfill by ensuring we fetch any missing auth events. -- Improve performance of backfill and joining remote rooms by removing unnecessary computations. This included handling events we\'d previously handled as well as attempting to compute the current state for outliers. +- Improve performance of backfill and joining remote rooms by removing unnecessary computations. This included handling events we'd previously handled as well as attempting to compute the current state for outliers. Changes in synapse v0.9.1 (2015-05-26) ====================================== @@ -3156,7 +3156,7 @@ Changes in synapse v0.9.1 (2015-05-26) General: - Add support for backfilling when a client paginates. This allows servers to request history for a room from remote servers when a client tries to paginate history the server does not have - SYN-36 -- Fix bug where you couldn\'t disable non-default pushrules - SYN-378 +- Fix bug where you couldn't disable non-default pushrules - SYN-378 - Fix `register_new_user` script - SYN-359 - Improve performance of fetching events from the database, this improves both initialSync and sending of events. - Improve performance of event streams, allowing synapse to handle more simultaneous connected clients. @@ -3225,7 +3225,7 @@ General: - Added new default push rules and made them configurable by clients: - Suppress all notice messages. - Notify when invited to a new room. - - Notify for messages that don\'t match any rule. + - Notify for messages that Don't match any rule. - Notify on incoming call. Federation: @@ -3268,7 +3268,7 @@ Changes in synapse v0.7.0 (2015-02-12) > - Computing the state of a room at a point in time, used for authorization on federation requests. > - Fetching events from the database. - > - User\'s room membership, used for authorizing presence updates. + > - User's room membership, used for authorizing presence updates. - Upgraded JSON library to improve parsing and serialisation speeds. @@ -3298,7 +3298,7 @@ Changes in synapse 0.6.0 (2014-12-16) Changes in synapse 0.5.4a (2014-12-13) ====================================== -- Fix bug while generating the error message when a file path specified in the config doesn\'t exist. +- Fix bug while generating the error message when a file path specified in the config doesn't exist. Changes in synapse 0.5.4 (2014-12-03) ===================================== @@ -3329,7 +3329,7 @@ Changes in synapse 0.5.1 (2014-11-26) See UPGRADES.rst for specific instructions on how to upgrade. - Fix bug where we served up an Event that did not match its signatures. -- Fix regression where we no longer correctly handled the case where a homeserver receives an event for a room it doesn\'t recognise (but is in.) +- Fix regression where we no longer correctly handled the case where a homeserver receives an event for a room it doesn't recognise (but is in.) Changes in synapse 0.5.0 (2014-11-19) ===================================== @@ -3342,7 +3342,7 @@ Homeserver: - Add authentication and authorization to the federation protocol. Events are now signed by their originating homeservers. - Implement the new authorization model for rooms. -- Split out web client into a seperate repository: matrix-angular-sdk. +- Split out web client into a separate repository: matrix-angular-sdk. - Change the structure of PDUs. - Fix bug where user could not join rooms via an alias containing 4-byte UTF-8 characters. - Merge concept of PDUs and Events internally. @@ -3352,7 +3352,7 @@ Homeserver: Webclient: -- The webclient has been moved to a seperate repository. +- The webclient has been moved to a separate repository. Changes in synapse 0.4.2 (2014-10-31) ===================================== @@ -3410,10 +3410,10 @@ Webclient: - Add button to send messages to users from the home page. - Add support for using TURN for VoIP calls. - Show display name change messages. -- Fix bug where the client didn\'t get the state of a newly joined room until after it has been refreshed. +- Fix bug where the client didn't get the state of a newly joined room until after it has been refreshed. - Fix bugs with tab complete. - Fix bug where holding down the down arrow caused chrome to chew 100% CPU. -- Fix bug where desktop notifications occasionally used \"Undefined\" as the display name. +- Fix bug where desktop notifications occasionally used "Undefined" as the display name. - Fix more places where we sometimes saw room IDs incorrectly. - Fix bug which caused lag when entering text in the text box. @@ -3427,21 +3427,21 @@ Homeserver: Webclient: - Add support for video calls with basic UI. -- Fix bug where one to one chats were named after your display name rather than the other person\'s. +- Fix bug where one to one chats were named after your display name rather than the other person's. - Fix bug which caused lag when typing in the textarea. -- Refuse to run on browsers we know won\'t work. +- Refuse to run on browsers we know won't work. - Trigger pagination when joining new rooms. -- Fix bug where we sometimes didn\'t display invitations in recents. +- Fix bug where we sometimes didn't display invitations in recents. - Automatically join room when accepting a VoIP call. -- Disable outgoing and reject incoming calls on browsers we don\'t support VoIP in. -- Don\'t display desktop notifications for messages in the room you are non-idle and speaking in. +- Disable outgoing and reject incoming calls on browsers we Don't support VoIP in. +- Don't display desktop notifications for messages in the room you are non-idle and speaking in. Changes in synapse 0.3.2 (2014-09-18) ===================================== Webclient: -- Fix bug where an empty \"bing words\" list in old accounts didn\'t send notifications when it should have done. +- Fix bug where an empty "bing words" list in old accounts didn't send notifications when it should have done. Changes in synapse 0.3.1 (2014-09-18) ===================================== @@ -3451,7 +3451,7 @@ This is a release to hotfix v0.3.0 to fix two regressions. Webclient: - Fix a regression where we sometimes displayed duplicate events. -- Fix a regression where we didn\'t immediately remove rooms you were banned in from the recents list. +- Fix a regression where we didn't immediately remove rooms you were banned in from the recents list. Changes in synapse 0.3.0 (2014-09-18) ===================================== @@ -3462,8 +3462,8 @@ Homeserver: - When a user changes their displayname or avatar the server will now update all their join states to reflect this. - The server now adds \"age\" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else. -- Fix bug where we didn\'t correctly pull in missing PDUs. -- Fix bug where prev\_content key wasn\'t always returned. +- Fix bug where we didn't correctly pull in missing PDUs. +- Fix bug where prev\_content key wasn't always returned. - Add support for password resets. Webclient: @@ -3473,7 +3473,7 @@ Webclient: - Always show room aliases in the UI if one is present. - No longer show user-count in the recents side panel. - Add up & down arrow support to the text box for message sending to step through your sent history. -- Don\'t display notifications for our own messages. +- Don't display notifications for our own messages. - Emotes are now formatted correctly in desktop notifications. - The recents list now differentiates between public & private rooms. - Fix bug where when switching between rooms the pagination flickered before the view jumped to the bottom of the screen. @@ -3503,7 +3503,7 @@ Webclient: - VoIP UI and reliability improvements. - Add glare support for VoIP. - Improvements to initial startup speed. -- Don\'t display duplicate join events. +- Don't display duplicate join events. - Local echo of messages. - Differentiate sending and sent of local echo. - Various minor bug fixes. @@ -3587,7 +3587,7 @@ Homeserver: Changes in synapse 0.1.0 (2014-08-29) ===================================== -Presence has been reenabled in this release. +Presence has been re-enabled in this release. Homeserver: @@ -3629,7 +3629,7 @@ Webclient: - Add profile pages. - Improve CSS layout of room. - Disambiguate identical display names. -- Don\'t get remote users display names and avatars individually. +- Don't get remote users display names and avatars individually. - Use the new initial sync API to reduce number of round trips to the homeserver. - Change url scheme to use room aliases instead of room ids where known. - Increase longpoll timeout. diff --git a/docs/consent_tracking.md b/docs/consent_tracking.md index fb1fec80fe..26620a0752 100644 --- a/docs/consent_tracking.md +++ b/docs/consent_tracking.md @@ -8,9 +8,9 @@ to the server until they have. There are several parts to this functionality; each requires some specific configuration in `homeserver.yaml` to be enabled. -Note that various parts of the configuation and this document refer to the +Note that various parts of the configuration and this document refer to the "privacy policy": agreement with a privacy policy is one particular use of this -feature, but of course adminstrators can specify other terms and conditions +feature, but of course administrators can specify other terms and conditions unrelated to "privacy" per se. Collecting policy agreement from a user diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 698687b91f..4ae2fcfee3 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -266,7 +266,7 @@ The easiest way to do so is to run Postgres via a docker container. In one terminal: ```shell -docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgress -p 5432:5432 postgres:14 +docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgres -p 5432:5432 postgres:14 ``` If you see an error like diff --git a/docs/development/synapse_architecture/faster_joins.md b/docs/development/synapse_architecture/faster_joins.md index 1e6d585b00..2256c30239 100644 --- a/docs/development/synapse_architecture/faster_joins.md +++ b/docs/development/synapse_architecture/faster_joins.md @@ -264,7 +264,7 @@ But don't want to send out sensitive data in other HS's events in this way. Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do. What about if we didn't send them an event but shouldn't've? E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them. -Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left. +Could imagine sending out the "Missed" events after the resync but... painful to work out what they should have seen if they joined/left. Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?) - Don't do this currently. - If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages diff --git a/docs/log_contexts.md b/docs/log_contexts.md index cb15dbe158..9d087d11ef 100644 --- a/docs/log_contexts.md +++ b/docs/log_contexts.md @@ -86,7 +86,7 @@ So we have stopped processing the request (and will probably go on to start processing the next), without clearing the logcontext. To circumvent this problem, synapse code assumes that, wherever you have -an awaitable, you will want to `await` it. To that end, whereever +an awaitable, you will want to `await` it. To that end, wherever functions return awaitables, we adopt the following conventions: **Rules for functions returning awaitables:** diff --git a/docs/postgres.md b/docs/postgres.md index fba4430f33..02d4b9b162 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -249,7 +249,7 @@ of `COLLATE` and `CTYPE` unless the config flag `allow_unsafe_locale`, found in underneath the database, or if a different version of the locale is used on any replicas. -If you have a databse with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with +If you have a database with an unsafe locale, the safest way to fix the issue is to dump the database and recreate it with the correct locale parameter (as shown above). It is also possible to change the parameters on a live database and run a `REINDEX` on the entire database, however extreme care must be taken to avoid database corruption. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 479f7ea543..0357d2a0fb 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -37,7 +37,7 @@ Dockerfile to automate a synapse server in a single Docker image, at Slavi Pantaleev has created an Ansible playbook, -which installs the offical Docker image of Matrix Synapse +which installs the official Docker image of Matrix Synapse along with many other Matrix-related services (Postgres database, Element, coturn, ma1sd, SSL support, etc.). For more details, see @@ -93,7 +93,7 @@ For `bookworm` and `sid`, it can be installed simply with: sudo apt install matrix-synapse ``` -Synapse is also avaliable in `bullseye-backports`. Please +Synapse is also available in `bullseye-backports`. Please see the [Debian documentation](https://backports.debian.org/Instructions/) for information on how to use backports. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index 083cda8413..c3b8c76609 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -38,7 +38,7 @@ noted when manually using the protocol: been disabled on the main process. - The server will only time connections out that have sent a `PING` command. If a ping is sent then the connection will be closed if no - further commands are receieved within 15s. Both the client and + further commands are received within 15s. Both the client and server protocol implementations will send an initial PING on connection and ensure at least one command every 5s is sent (not necessarily `PING`). @@ -128,7 +128,7 @@ batching. See `RdataCommand` for more details. ### Example -An example iteraction is shown below. Each line is prefixed with '>' +An example interaction is shown below. Each line is prefixed with '>' or '<' to indicate which side is sending, these are *not* included on the wire: diff --git a/docs/turn-howto.md b/docs/turn-howto.md index 4e9e4117cd..9c1c6f4777 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -18,7 +18,7 @@ This documentation provides two TURN server configuration examples: For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP. -Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP. +Hosting TURN behind NAT requires port forwarding and for the NAT gateway to have a public IP. However, even with appropriate configuration, NAT is known to cause issues and to often not work. Afterwards, the homeserver needs some further configuration. diff --git a/docs/upgrade.md b/docs/upgrade.md index 2f888b6f12..ba2f7703bc 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -1352,7 +1352,7 @@ In line with our [deprecation policy](deprecation_policy.md), we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream. -This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or +This release of Synapse requires Python 3.6+ and PostgreSQL 9.6+ or SQLite 3.22+. ## Removal of old List Accounts Admin API @@ -2312,7 +2312,7 @@ for details. # Upgrading to v0.11.0 This release includes the option to send anonymous usage stats to -matrix.org, and requires that administrators explictly opt in or out by +matrix.org, and requires that administrators explicitly opt in or out by setting the `report_stats` option to either `true` or `false`. We would really appreciate it if you could help our project out by @@ -2416,7 +2416,7 @@ latest module, please run: # Upgrading to v0.5.0 -The webclient has been split out into a seperate repository/pacakage in +The webclient has been split out into a separate repository/package in this release. Before you restart your homeserver you will need to pull in the webclient package by running: diff --git a/docs/usage/administration/admin_api/federation.md b/docs/usage/administration/admin_api/federation.md index 51f3b52da8..ce735793c0 100644 --- a/docs/usage/administration/admin_api/federation.md +++ b/docs/usage/administration/admin_api/federation.md @@ -77,7 +77,7 @@ The following fields are returned in the JSON response body: remote server, in ms. This is `0` if the last attempt to communicate with the remote server was successful. - `retry_interval` - integer - How long since the last time Synapse tried to reach - the remote server before trying again, in ms. This is `0` if no further retrying occuring. + the remote server before trying again, in ms. This is `0` if no further retrying occurring. - `failure_ts` - nullable integer - The first time Synapse tried and failed to reach the remote server, in ms. This is `null` if communication with the remote server has never failed. - `last_successful_stream_ordering` - nullable integer - The stream ordering of the most diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 42df53d52b..a06b3d8a06 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -521,7 +521,7 @@ listeners: Example configuration #2: ```yaml listeners: - # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy + # Insecure HTTP listener: for when matrix traffic passes through a reverse proxy # that unwraps TLS. # # If you plan to use a reverse proxy, please see @@ -2945,7 +2945,7 @@ Normally, the connection to the key server is validated via TLS certificates. Additional security can be provided by configuring a `verify key`, which will make synapse check that the response is signed by that key. -This setting supercedes an older setting named `perspectives`. The old format +This setting supersedes an older setting named `perspectives`. The old format is still supported for backwards-compatibility, but it is deprecated. `trusted_key_servers` defaults to matrix.org, but using it will generate a From c1c6c95d72b5c9fc6c0e527eeb6b9d3a59889b16 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 8 Sep 2023 14:50:13 +0100 Subject: [PATCH 446/562] Log values at DEBUG level with execute_values (#16281) --- changelog.d/16281.misc | 1 + synapse/storage/database.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16281.misc diff --git a/changelog.d/16281.misc b/changelog.d/16281.misc new file mode 100644 index 0000000000..de48396aff --- /dev/null +++ b/changelog.d/16281.misc @@ -0,0 +1 @@ +Include values in SQL debug when using `execute_values` with Postgres. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 55ac313f33..6c5fcdcec3 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -422,10 +422,11 @@ class LoggingTransaction: return self._do_execute( # TODO: is it safe for values to be Iterable[Iterable[Any]] here? # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_values says values should be Sequence[Sequence] - lambda the_sql: execute_values( - self.txn, the_sql, values, template=template, fetch=fetch + lambda the_sql, the_values: execute_values( + self.txn, the_sql, the_values, template=template, fetch=fetch ), sql, + values, ) def execute(self, sql: str, parameters: SQLQueryParameters = ()) -> None: From aa483cb4c905bbe483ffe8e8a8f439655a57481b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 8 Sep 2023 11:24:36 -0400 Subject: [PATCH 447/562] Update ruff config (#16283) Enable additional checks & clean-up unneeded configuration. --- changelog.d/16283.misc | 1 + contrib/cmdclient/http.py | 2 -- docker/start.py | 2 +- pyproject.toml | 28 +++++++++++-------- scripts-dev/mypy_synapse_plugin.py | 7 +++-- synapse/_scripts/update_synapse_database.py | 1 - synapse/events/snapshot.py | 2 -- synapse/media/url_previewer.py | 4 +-- synapse/storage/background_updates.py | 2 -- synmark/suites/logging.py | 2 +- tests/handlers/test_device.py | 2 +- tests/handlers/test_federation.py | 2 +- tests/logging/test_remote_handler.py | 12 ++++---- .../replication/tcp/streams/test_to_device.py | 2 +- tests/rest/admin/test_federation.py | 6 ++-- tests/rest/client/test_account.py | 2 +- tests/rest/client/test_login.py | 8 +++--- tests/rest/client/test_register.py | 6 ++-- tests/storage/databases/main/test_lock.py | 2 +- tests/storage/test_event_chain.py | 6 ++-- tests/storage/test_event_federation.py | 6 ++-- tests/storage/test_profile.py | 4 +-- tests/storage/test_txn_limit.py | 2 +- tests/storage/test_user_filters.py | 4 +-- tests/test_visibility.py | 8 +++--- tests/util/caches/test_descriptors.py | 4 +-- 26 files changed, 63 insertions(+), 64 deletions(-) create mode 100644 changelog.d/16283.misc diff --git a/changelog.d/16283.misc b/changelog.d/16283.misc new file mode 100644 index 0000000000..4b9d6f76ae --- /dev/null +++ b/changelog.d/16283.misc @@ -0,0 +1 @@ +Enable additional linting checks. diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 1310f078e3..508de5dcbd 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -37,7 +37,6 @@ class HttpClient: Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. """ - pass def get_json(self, url, args=None): """Gets some json from the given host homeserver and path @@ -53,7 +52,6 @@ class HttpClient: Deferred: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. """ - pass class TwistedHttpClient(HttpClient): diff --git a/docker/start.py b/docker/start.py index aebc7e4aaa..12c444da9a 100755 --- a/docker/start.py +++ b/docker/start.py @@ -239,7 +239,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: log("Could not find %s, will not use" % (jemallocpath,)) # if there are no config files passed to synapse, try adding the default file - if not any(p.startswith("--config-path") or p.startswith("-c") for p in args): + if not any(p.startswith(("--config-path", "-c")) for p in args): config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") config_path = environ.get( "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml" diff --git a/pyproject.toml b/pyproject.toml index 5b43abe907..8747782b29 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,33 +43,39 @@ target-version = ['py38', 'py39', 'py310', 'py311'] [tool.ruff] line-length = 88 -# See https://github.com/charliermarsh/ruff/#pycodestyle +# See https://beta.ruff.rs/docs/rules/#error-e # for error codes. The ones we ignore are: -# E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) +# E731: do not assign a lambda expression, use a def # # flake8-bugbear compatible checks. Its error codes are described at -# https://github.com/charliermarsh/ruff/#flake8-bugbear -# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks +# https://beta.ruff.rs/docs/rules/#flake8-bugbear-b # B023: Functions defined inside a loop must not use variables redefined in the loop -# B024: Abstract base class with no abstract method. ignore = [ - "B019", "B023", - "B024", "E501", "E731", ] select = [ - # pycodestyle checks. + # pycodestyle "E", "W", - # pyflakes checks. + # pyflakes "F", - # flake8-bugbear checks. + # flake8-bugbear "B0", - # flake8-comprehensions checks. + # flake8-comprehensions "C4", + # flake8-2020 + "YTT", + # flake8-slots + "SLOT", + # flake8-debugger + "T10", + # flake8-pie + "PIE", + # flake8-executable + "EXE", ] [tool.isort] diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 8058e9c993..a0b3854f1b 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -30,9 +30,10 @@ class SynapsePlugin(Plugin): self, fullname: str ) -> Optional[Callable[[MethodSigContext], CallableType]]: if fullname.startswith( - "synapse.util.caches.descriptors.CachedFunction.__call__" - ) or fullname.startswith( - "synapse.util.caches.descriptors._LruCachedFunction.__call__" + ( + "synapse.util.caches.descriptors.CachedFunction.__call__", + "synapse.util.caches.descriptors._LruCachedFunction.__call__", + ) ): return cached_function_method_signature return None diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index f97aecf8d5..992ae43881 100644 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a9e3d4e556..5bdfa3a8ac 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -55,7 +55,6 @@ class UnpersistedEventContextBase(ABC): A method to convert an UnpersistedEventContext to an EventContext, suitable for sending to the database with the associated event. """ - pass @abstractmethod async def get_prev_state_ids( @@ -69,7 +68,6 @@ class UnpersistedEventContextBase(ABC): state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules """ - pass @attr.s(slots=True, auto_attribs=True) diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 70b32cee17..9b5a3dd5f4 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -846,9 +846,7 @@ def _is_media(content_type: str) -> bool: def _is_html(content_type: str) -> bool: content_type = content_type.lower() - return content_type.startswith("text/html") or content_type.startswith( - "application/xhtml" - ) + return content_type.startswith(("text/html", "application/xhtml")) def _is_json(content_type: str) -> bool: diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 7619f405fa..99ebd96f84 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -62,7 +62,6 @@ class Constraint(metaclass=abc.ABCMeta): @abc.abstractmethod def make_check_clause(self, table: str) -> str: """Returns an SQL expression that checks the row passes the constraint.""" - pass @abc.abstractmethod def make_constraint_clause_postgres(self) -> str: @@ -70,7 +69,6 @@ class Constraint(metaclass=abc.ABCMeta): Only used on Postgres DBs """ - pass @attr.s(auto_attribs=True) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index 8beb077e0a..04e5b29dc9 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -112,7 +112,7 @@ async def main(reactor, loops): start = perf_counter() # Send a bunch of useful messages - for i in range(0, loops): + for i in range(loops): logger.info("test message %s", i) if len(handler._buffer) == handler.maximum_buffer: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 9659a4a355..79d327499b 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -223,7 +223,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): # queue a bunch of messages in the inbox requester = create_requester(sender, device_id=DEVICE_ID) - for i in range(0, DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): + for i in range(DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10): self.get_success( self.device_message_handler.send_device_message( requester, "message_type", {receiver: {"*": {"val": i}}} diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 21d63ab1f2..4fc0742413 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -262,7 +262,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): if (ev.type, ev.state_key) in {("m.room.create", ""), ("m.room.member", remote_server_user_id)} ] - for _ in range(0, 8): + for _ in range(8): event = make_event_from_dict( self.add_hashes_and_signatures_from_other_server( { diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index 5191e31a8a..45eac100bf 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -78,11 +78,11 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): logger = self.get_logger(handler) # Send some debug messages - for i in range(0, 3): + for i in range(3): logger.debug("debug %s" % (i,)) # Send a bunch of useful messages - for i in range(0, 7): + for i in range(7): logger.info("info %s" % (i,)) # The last debug message pushes it past the maximum buffer @@ -108,15 +108,15 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): logger = self.get_logger(handler) # Send some debug messages - for i in range(0, 3): + for i in range(3): logger.debug("debug %s" % (i,)) # Send a bunch of useful messages - for i in range(0, 10): + for i in range(10): logger.warning("warn %s" % (i,)) # Send a bunch of info messages - for i in range(0, 3): + for i in range(3): logger.info("info %s" % (i,)) # The last debug message pushes it past the maximum buffer @@ -144,7 +144,7 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): logger = self.get_logger(handler) # Send a bunch of useful messages - for i in range(0, 20): + for i in range(20): logger.warning("warn %s" % (i,)) # Allow the reconnection diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py index fb9eac668f..ab379e8cf1 100644 --- a/tests/replication/tcp/streams/test_to_device.py +++ b/tests/replication/tcp/streams/test_to_device.py @@ -49,7 +49,7 @@ class ToDeviceStreamTestCase(BaseStreamTestCase): # add messages to the device inbox for user1 up until the # limit defined for a stream update batch - for i in range(0, _STREAM_UPDATE_TARGET_ROW_COUNT): + for i in range(_STREAM_UPDATE_TARGET_ROW_COUNT): msg["content"] = {"device": {}} messages = {user1: {"device": msg}} diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 4c7864c629..0e2824d1b5 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -510,7 +510,7 @@ class FederationTestCase(unittest.HomeserverTestCase): Args: number_destinations: Number of destinations to be created """ - for i in range(0, number_destinations): + for i in range(number_destinations): dest = f"sub{i}.example.com" self._create_destination(dest, 50, 50, 50, 100) @@ -690,7 +690,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): self._check_fields(channel_desc.json_body["rooms"]) # test that both lists have different directions - for i in range(0, number_rooms): + for i in range(number_rooms): self.assertEqual( channel_asc.json_body["rooms"][i]["room_id"], channel_desc.json_body["rooms"][number_rooms - 1 - i]["room_id"], @@ -777,7 +777,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): Args: number_rooms: Number of rooms to be created """ - for _ in range(0, number_rooms): + for _ in range(number_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 4a0eca5b30..cffbda9a7d 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -575,7 +575,7 @@ class DeactivateTestCase(unittest.HomeserverTestCase): # create a bunch of users and add keys for them users = [] - for i in range(0, 20): + for i in range(20): user_id = self.register_user("missPiggy" + str(i), "test") users.append((user_id,)) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index a2a6589564..768d7ad4c2 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -176,10 +176,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): def test_POST_ratelimiting_per_address(self) -> None: # Create different users so we're sure not to be bothered by the per-user # ratelimiter. - for i in range(0, 6): + for i in range(6): self.register_user("kermit" + str(i), "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit" + str(i)}, @@ -228,7 +228,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): def test_POST_ratelimiting_per_account(self) -> None: self.register_user("kermit", "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit"}, @@ -277,7 +277,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): def test_POST_ratelimiting_per_account_failed_attempts(self) -> None: self.register_user("kermit", "monkey") - for i in range(0, 6): + for i in range(6): params = { "type": "m.login.password", "identifier": {"type": "m.id.user", "user": "kermit"}, diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index c33393dc28..ba4e017a0e 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -169,7 +169,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting_guest(self) -> None: - for i in range(0, 6): + for i in range(6): url = self.url + b"?kind=guest" channel = self.make_request(b"POST", url, b"{}") @@ -187,7 +187,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting(self) -> None: - for i in range(0, 6): + for i in range(6): request_data = { "username": "kermit" + str(i), "password": "monkey", @@ -1223,7 +1223,7 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): def test_GET_ratelimiting(self) -> None: token = "1234" - for i in range(0, 6): + for i in range(6): channel = self.make_request( b"GET", f"{self.url}?token={token}", diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 650b4941ba..35f77052a7 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -382,7 +382,7 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase): self.get_success(lock.__aenter__()) # Wait for ages with the lock, we should not be able to get the lock. - for _ in range(0, 10): + for _ in range(10): self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000)) lock2 = self.get_success( diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 48ebfadaab..b55dd07f14 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -664,7 +664,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase): # Add a bunch of state so that it takes multiple iterations of the # background update to process the room. - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id, event_type="m.test", body={"index": i}, tok=self.token ) @@ -718,12 +718,12 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase): # Add a bunch of state so that it takes multiple iterations of the # background update to process the room. - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id1, event_type="m.test", body={"index": i}, tok=self.token ) - for i in range(0, 150): + for i in range(150): self.helper.send_state( room_id2, event_type="m.test", body={"index": i}, tok=self.token ) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 7a4ecab2d5..d3e20f44b2 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -227,7 +227,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): (room_id, event_id), ) - for i in range(0, 20): + for i in range(20): self.get_success( self.store.db_pool.runInteraction("insert", insert_event, i) ) @@ -235,7 +235,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): # this should get the last ten r = self.get_success(self.store.get_prev_events_for_room(room_id)) self.assertEqual(10, len(r)) - for i in range(0, 10): + for i in range(10): self.assertEqual("$event_%i:local" % (19 - i), r[i]) def test_get_rooms_with_many_extremities(self) -> None: @@ -277,7 +277,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): (room_id, event_id), ) - for i in range(0, 20): + for i in range(20): self.get_success( self.store.db_pool.runInteraction("insert", insert_event, i, room1) ) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index fe5bb77913..95f99f4130 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -82,7 +82,7 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): self.get_success(self.store.db_pool.runInteraction("", f)) - for i in range(0, 70): + for i in range(70): self.get_success( self.store.db_pool.simple_insert( "profiles", @@ -115,7 +115,7 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): ) expected_values = [] - for i in range(0, 70): + for i in range(70): expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) res = self.get_success( diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py index 15ea4770bd..22f074982f 100644 --- a/tests/storage/test_txn_limit.py +++ b/tests/storage/test_txn_limit.py @@ -38,5 +38,5 @@ class SQLTransactionLimitTestCase(unittest.HomeserverTestCase): db_pool = self.hs.get_datastores().databases[0] # force txn limit to roll over at least once - for _ in range(0, 1001): + for _ in range(1001): self.get_success_or_raise(db_pool.runInteraction("test_select", do_select)) diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py index bab802f56e..d4637d9d1e 100644 --- a/tests/storage/test_user_filters.py +++ b/tests/storage/test_user_filters.py @@ -45,7 +45,7 @@ class UserFiltersStoreTestCase(unittest.HomeserverTestCase): self.get_success(self.store.db_pool.runInteraction("", f)) - for i in range(0, 70): + for i in range(70): self.get_success( self.store.db_pool.simple_insert( "user_filters", @@ -82,7 +82,7 @@ class UserFiltersStoreTestCase(unittest.HomeserverTestCase): ) expected_values = [] - for i in range(0, 70): + for i in range(70): expected_values.append((f"@hello{i:02}:{self.hs.hostname}",)) res = self.get_success( diff --git a/tests/test_visibility.py b/tests/test_visibility.py index a46c29ddf4..434902c3f0 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -51,12 +51,12 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase): # before we do that, we persist some other events to act as state. self._inject_visibility("@admin:hs", "joined") - for i in range(0, 10): + for i in range(10): self._inject_room_member("@resident%i:hs" % i) events_to_filter = [] - for i in range(0, 10): + for i in range(10): user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server") evt = self._inject_room_member(user, extra_content={"a": "b"}) events_to_filter.append(evt) @@ -74,7 +74,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase): ) # the result should be 5 redacted events, and 5 unredacted events. - for i in range(0, 5): + for i in range(5): self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id) self.assertNotIn("a", filtered[i].content) @@ -177,7 +177,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase): ) ) - for i in range(0, len(events_to_filter)): + for i in range(len(events_to_filter)): self.assertEqual( events_to_filter[i].event_id, filtered[i].event_id, diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 064f4987df..168419f440 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -623,14 +623,14 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase): a = A() - for k in range(0, 12): + for k in range(12): yield a.func(k) self.assertEqual(callcount[0], 12) # There must have been at least 2 evictions, meaning if we calculate # all 12 values again, we must get called at least 2 more times - for k in range(0, 12): + for k in range(12): yield a.func(k) self.assertTrue( From a0ed55ef129285e87d9947ae578ff275958169f7 Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 8 Sep 2023 18:55:43 +0200 Subject: [PATCH 448/562] Upgrade CI run of Python 3.12 from rc1 to rc2 (#16280) --- .ci/scripts/calculate_jobs.py | 2 +- changelog.d/16280.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16280.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 661887e209..08af332b6d 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -47,7 +47,7 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.9", "3.10", "3.11", "3.12.0-rc.1") + for version in ("3.9", "3.10", "3.11", "3.12.0-rc.2") ) trial_postgres_tests = [ diff --git a/changelog.d/16280.misc b/changelog.d/16280.misc new file mode 100644 index 0000000000..2d8b414a3b --- /dev/null +++ b/changelog.d/16280.misc @@ -0,0 +1 @@ +Upgrade CI run of Python 3.12 from rc1 to rc2. From edd83f23b710f0caae05d5766b474de3b6f24e9e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 8 Sep 2023 19:29:38 +0100 Subject: [PATCH 449/562] Improve type hints for attrs classes (#16276) --- changelog.d/16276.misc | 1 + synapse/config/oembed.py | 2 +- synapse/storage/controllers/persist_events.py | 8 +++--- synapse/util/async_helpers.py | 25 ++++++++----------- synapse/util/caches/dictionary_cache.py | 10 +++----- synapse/util/caches/expiringcache.py | 20 +++++++++------ synapse/util/caches/ttlcache.py | 10 ++++---- 7 files changed, 37 insertions(+), 39 deletions(-) create mode 100644 changelog.d/16276.misc diff --git a/changelog.d/16276.misc b/changelog.d/16276.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16276.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index d7959639ee..59bc0b55f4 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -30,7 +30,7 @@ class OEmbedEndpointConfig: # The API endpoint to fetch. api_endpoint: str # The patterns to match. - url_patterns: List[Pattern] + url_patterns: List[Pattern[str]] # The supported formats. formats: Optional[List[str]] diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index abd1d149db..6864f93090 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -154,12 +154,13 @@ class _UpdateCurrentStateTask: _EventPersistQueueTask = Union[_PersistEventsTask, _UpdateCurrentStateTask] +_PersistResult = TypeVar("_PersistResult") @attr.s(auto_attribs=True, slots=True) -class _EventPersistQueueItem: +class _EventPersistQueueItem(Generic[_PersistResult]): task: _EventPersistQueueTask - deferred: ObservableDeferred + deferred: ObservableDeferred[_PersistResult] parent_opentracing_span_contexts: List = attr.ib(factory=list) """A list of opentracing spans waiting for this batch""" @@ -168,9 +169,6 @@ class _EventPersistQueueItem: """The opentracing span under which the persistence actually happened""" -_PersistResult = TypeVar("_PersistResult") - - class _EventPeristenceQueue(Generic[_PersistResult]): """Queues up tasks so that they can be processed with only one concurrent transaction per room. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 943ad54456..0cbeb0c365 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -19,6 +19,7 @@ import collections import inspect import itertools import logging +import typing from contextlib import asynccontextmanager from typing import ( Any, @@ -29,6 +30,7 @@ from typing import ( Collection, Coroutine, Dict, + Generator, Generic, Hashable, Iterable, @@ -398,7 +400,7 @@ class _LinearizerEntry: # The number of things executing. count: int # Deferreds for the things blocked from executing. - deferreds: collections.OrderedDict + deferreds: typing.OrderedDict["defer.Deferred[None]", Literal[1]] class Linearizer: @@ -717,30 +719,25 @@ def timeout_deferred( return new_d -# This class can't be generic because it uses slots with attrs. -# See: https://github.com/python-attrs/attrs/issues/313 @attr.s(slots=True, frozen=True, auto_attribs=True) -class DoneAwaitable: # should be: Generic[R] +class DoneAwaitable(Awaitable[R]): """Simple awaitable that returns the provided value.""" - value: Any # should be: R + value: R - def __await__(self) -> Any: - return self - - def __iter__(self) -> "DoneAwaitable": - return self - - def __next__(self) -> None: - raise StopIteration(self.value) + def __await__(self) -> Generator[Any, None, R]: + yield None + return self.value def maybe_awaitable(value: Union[Awaitable[R], R]) -> Awaitable[R]: """Convert a value to an awaitable if not already an awaitable.""" if inspect.isawaitable(value): - assert isinstance(value, Awaitable) return value + # For some reason mypy doesn't deduce that value is not Awaitable here, even though + # inspect.isawaitable returns a TypeGuard. + assert not isinstance(value, Awaitable) return DoneAwaitable(value) diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 5eaf70c7ab..2fbc7b1e6c 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -14,7 +14,7 @@ import enum import logging import threading -from typing import Any, Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union +from typing import Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union import attr from typing_extensions import Literal @@ -33,10 +33,8 @@ DKT = TypeVar("DKT") DV = TypeVar("DV") -# This class can't be generic because it uses slots with attrs. -# See: https://github.com/python-attrs/attrs/issues/313 @attr.s(slots=True, frozen=True, auto_attribs=True) -class DictionaryEntry: # should be: Generic[DKT, DV]. +class DictionaryEntry(Generic[DKT, DV]): """Returned when getting an entry from the cache If `full` is true then `known_absent` will be the empty set. @@ -50,8 +48,8 @@ class DictionaryEntry: # should be: Generic[DKT, DV]. """ full: bool - known_absent: Set[Any] # should be: Set[DKT] - value: Dict[Any, Any] # should be: Dict[DKT, DV] + known_absent: Set[DKT] + value: Dict[DKT, DV] def __len__(self) -> int: return len(self.value) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 01ad02af67..8e4c34039d 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -14,7 +14,7 @@ import logging from collections import OrderedDict -from typing import Any, Generic, Optional, TypeVar, Union, overload +from typing import Any, Generic, Iterable, Optional, TypeVar, Union, overload import attr from typing_extensions import Literal @@ -73,7 +73,7 @@ class ExpiringCache(Generic[KT, VT]): self._expiry_ms = expiry_ms self._reset_expiry_on_get = reset_expiry_on_get - self._cache: OrderedDict[KT, _CacheEntry] = OrderedDict() + self._cache: OrderedDict[KT, _CacheEntry[VT]] = OrderedDict() self.iterable = iterable @@ -100,7 +100,10 @@ class ExpiringCache(Generic[KT, VT]): while self._max_size and len(self) > self._max_size: _key, value = self._cache.popitem(last=False) if self.iterable: - self.metrics.inc_evictions(EvictionReason.size, len(value.value)) + # type-ignore, here and below: if self.iterable is true, then the value + # type VT should be Sized (i.e. have a __len__ method). We don't enforce + # this via the type system at present. + self.metrics.inc_evictions(EvictionReason.size, len(value.value)) # type: ignore[arg-type] else: self.metrics.inc_evictions(EvictionReason.size) @@ -134,7 +137,7 @@ class ExpiringCache(Generic[KT, VT]): return default if self.iterable: - self.metrics.inc_evictions(EvictionReason.invalidation, len(value.value)) + self.metrics.inc_evictions(EvictionReason.invalidation, len(value.value)) # type: ignore[arg-type] else: self.metrics.inc_evictions(EvictionReason.invalidation) @@ -182,7 +185,7 @@ class ExpiringCache(Generic[KT, VT]): for k in keys_to_delete: value = self._cache.pop(k) if self.iterable: - self.metrics.inc_evictions(EvictionReason.time, len(value.value)) + self.metrics.inc_evictions(EvictionReason.time, len(value.value)) # type: ignore[arg-type] else: self.metrics.inc_evictions(EvictionReason.time) @@ -195,7 +198,8 @@ class ExpiringCache(Generic[KT, VT]): def __len__(self) -> int: if self.iterable: - return sum(len(entry.value) for entry in self._cache.values()) + g: Iterable[int] = (len(entry.value) for entry in self._cache.values()) # type: ignore[arg-type] + return sum(g) else: return len(self._cache) @@ -218,6 +222,6 @@ class ExpiringCache(Generic[KT, VT]): @attr.s(slots=True, auto_attribs=True) -class _CacheEntry: +class _CacheEntry(Generic[VT]): time: int - value: Any + value: VT diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index f6b3ee31e4..48a6e4a906 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -35,10 +35,10 @@ class TTLCache(Generic[KT, VT]): def __init__(self, cache_name: str, timer: Callable[[], float] = time.time): # map from key to _CacheEntry - self._data: Dict[KT, _CacheEntry] = {} + self._data: Dict[KT, _CacheEntry[KT, VT]] = {} # the _CacheEntries, sorted by expiry time - self._expiry_list: SortedList[_CacheEntry] = SortedList() + self._expiry_list: SortedList[_CacheEntry[KT, VT]] = SortedList() self._timer = timer @@ -160,11 +160,11 @@ class TTLCache(Generic[KT, VT]): @attr.s(frozen=True, slots=True, auto_attribs=True) -class _CacheEntry: # Should be Generic[KT, VT]. See python-attrs/attrs#313 +class _CacheEntry(Generic[KT, VT]): """TTLCache entry""" # expiry_time is the first attribute, so that entries are sorted by expiry. expiry_time: float ttl: float - key: Any # should be KT - value: Any # should be VT + key: KT + value: VT From e8ebc730ca76cb37017acc7d6b7ff28230ec3a97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:53:24 +0100 Subject: [PATCH 450/562] Bump serde_json from 1.0.105 to 1.0.106 (#16296) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 95a713e437..4e233b1683 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" dependencies = [ "itoa", "ryu", From aafcaf277e9dc4116bafac056d0cf740460ee10f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:53:46 +0100 Subject: [PATCH 451/562] Bump black from 23.7.0 to 23.9.1 (#16295) --- poetry.lock | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/poetry.lock b/poetry.lock index b577ae4f18..3e9482bdbc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -148,33 +148,33 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.7.0" +version = "23.9.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-23.7.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587"}, - {file = "black-23.7.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f"}, - {file = "black-23.7.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be"}, - {file = "black-23.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc"}, - {file = "black-23.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd"}, - {file = "black-23.7.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a"}, - {file = "black-23.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926"}, - {file = "black-23.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3"}, - {file = "black-23.7.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6"}, - {file = "black-23.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a"}, - {file = "black-23.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3"}, - {file = "black-23.7.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087"}, - {file = "black-23.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91"}, - {file = "black-23.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491"}, - {file = "black-23.7.0-py3-none-any.whl", hash = "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96"}, - {file = "black-23.7.0.tar.gz", hash = "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100"}, + {file = "black-23.9.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71"}, + {file = "black-23.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7"}, + {file = "black-23.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe"}, + {file = "black-23.9.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186"}, + {file = "black-23.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f"}, + {file = "black-23.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855"}, + {file = "black-23.9.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204"}, + {file = "black-23.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377"}, + {file = "black-23.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325"}, + {file = "black-23.9.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393"}, + {file = "black-23.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9"}, + {file = "black-23.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f"}, + {file = "black-23.9.1-py3-none-any.whl", hash = "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9"}, + {file = "black-23.9.1.tar.gz", hash = "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d"}, ] [package.dependencies] @@ -184,7 +184,7 @@ packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} [package.extras] colorama = ["colorama (>=0.4.3)"] From ee65d8f750cf3e894939a4f5ca9e65f90caf994e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:53:56 +0100 Subject: [PATCH 452/562] Bump mypy-zope from 1.0.0 to 1.0.1 (#16291) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3e9482bdbc..381a66af4d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1502,17 +1502,17 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.0" +version = "1.0.1" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy-zope-1.0.0.tar.gz", hash = "sha256:be815c2fcb5333aa87e8ec682029ad3214142fe2a05ea383f9ff2d77c98008b7"}, - {file = "mypy_zope-1.0.0-py3-none-any.whl", hash = "sha256:9732e9b2198f2aec3343b38a51905ff49d44dc9e39e8e8bc6fc490b232388209"}, + {file = "mypy-zope-1.0.1.tar.gz", hash = "sha256:003953896629d762d7f497135171ad549df42a8ac63c1521a230832dd6f7fc25"}, + {file = "mypy_zope-1.0.1-py3-none-any.whl", hash = "sha256:ffa291a7af9f5904ce9f0e56de44323a4476e28aaf0d68361b62b1b0e997d0b8"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.5.0" +mypy = ">=1.0.0,<1.6.0" "zope.interface" = "*" "zope.schema" = "*" From f93cd6abbb569dc057ded9d77b5e27971469102c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:54:28 +0100 Subject: [PATCH 453/562] Bump types-setuptools from 68.0.0.3 to 68.2.0.0 (#16292) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 381a66af4d..3705b100af 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3058,13 +3058,13 @@ types-urllib3 = "*" [[package]] name = "types-setuptools" -version = "68.0.0.3" +version = "68.2.0.0" description = "Typing stubs for setuptools" optional = false python-versions = "*" files = [ - {file = "types-setuptools-68.0.0.3.tar.gz", hash = "sha256:d57ae6076100b5704b3cc869fdefc671e1baf4c2cd6643f84265dfc0b955bf05"}, - {file = "types_setuptools-68.0.0.3-py3-none-any.whl", hash = "sha256:fec09e5c18264c5c09351c00be01a34456fb7a88e457abe97401325f84ad9d36"}, + {file = "types-setuptools-68.2.0.0.tar.gz", hash = "sha256:a4216f1e2ef29d089877b3af3ab2acf489eb869ccaf905125c69d2dc3932fd85"}, + {file = "types_setuptools-68.2.0.0-py3-none-any.whl", hash = "sha256:77edcc843e53f8fc83bb1a840684841f3dc804ec94562623bfa2ea70d5a2ba1b"}, ] [[package]] From b0e93b63d43f81c05de6c44f387cba6bed26cc70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 09:54:37 +0100 Subject: [PATCH 454/562] Bump types-pillow from 10.0.0.2 to 10.0.0.3 (#16293) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3705b100af..e4cea28282 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2997,13 +2997,13 @@ files = [ [[package]] name = "types-pillow" -version = "10.0.0.2" +version = "10.0.0.3" description = "Typing stubs for Pillow" optional = false python-versions = "*" files = [ - {file = "types-Pillow-10.0.0.2.tar.gz", hash = "sha256:fe09380ab22d412ced989a067e9ee4af719fa3a47ba1b53b232b46514a871042"}, - {file = "types_Pillow-10.0.0.2-py3-none-any.whl", hash = "sha256:29d51a3ce6ef51fabf728a504d33b4836187ff14256b2e86996d55c91ab214b1"}, + {file = "types-Pillow-10.0.0.3.tar.gz", hash = "sha256:ae0c877d363da349bbb82c5463c9e78037290cc07d3714cb0ceaf5d2f7f5c825"}, + {file = "types_Pillow-10.0.0.3-py3-none-any.whl", hash = "sha256:54a49f3c6a3f5e95ebeee396d7773dde22ce2515d594f9c0596c0a983558f0d4"}, ] [[package]] From 151e4bbc45dbf7b767b1a6a74ffb4cd7889ccf78 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 Sep 2023 13:11:02 +0100 Subject: [PATCH 455/562] Filter out down hosts when retrying fetching device lists (#16298) --- changelog.d/16298.misc | 1 + synapse/handlers/device.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16298.misc diff --git a/changelog.d/16298.misc b/changelog.d/16298.misc new file mode 100644 index 0000000000..75b546d424 --- /dev/null +++ b/changelog.d/16298.misc @@ -0,0 +1 @@ +Don't try refetching device lists for users on remote hosts that are marked as "down". diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9356ae998e..9d240ad4ee 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -58,7 +58,10 @@ from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.cancellation import cancellable from synapse.util.metrics import measure_func -from synapse.util.retryutils import NotRetryingDestination +from synapse.util.retryutils import ( + NotRetryingDestination, + filter_destinations_by_retry_limiter, +) if TYPE_CHECKING: from synapse.server import HomeServer @@ -1269,8 +1272,18 @@ class DeviceListUpdater(DeviceListWorkerUpdater): self._resync_retry_in_progress = True # Get all of the users that need resyncing. need_resync = await self.store.get_user_ids_requiring_device_list_resync() + + # Filter out users whose host is marked as "down" up front. + hosts = await filter_destinations_by_retry_limiter( + {get_domain_from_id(u) for u in need_resync}, self.clock, self.store + ) + hosts = set(hosts) + # Iterate over the set of user IDs. for user_id in need_resync: + if get_domain_from_id(user_id) not in hosts: + continue + try: # Try to resync the current user's devices list. result = (await self.multi_user_device_resync([user_id], False))[ From 9400dc05357b4272425c7be47ceeced26fa3f28c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 11 Sep 2023 09:49:48 -0400 Subject: [PATCH 456/562] Add the List-Unsubscribe header for notification emails. (#16274) Adds both the List-Unsubscribe (RFC2369) and List-Unsubscribe-Post (RFC8058) headers to push notification emails, which together should: * Show an "Unsubscribe" link in the MUA UI when viewing Synapse notification emails. * Enable "one-click" unsubscribe (the user never leaves their MUA, which automatically makes a POST request to the specified endpoint). --- changelog.d/16274.feature | 1 + synapse/handlers/send_email.py | 10 +++- synapse/push/mailer.py | 33 +++++++++++-- synapse/rest/synapse/client/unsubscribe.py | 17 +++++++ tests/push/test_email.py | 55 ++++++++++++++++++++++ 5 files changed, 110 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16274.feature diff --git a/changelog.d/16274.feature b/changelog.d/16274.feature new file mode 100644 index 0000000000..0d9da2bbef --- /dev/null +++ b/changelog.d/16274.feature @@ -0,0 +1 @@ +Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 05e21509de..4f5fe62fe8 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -17,7 +17,7 @@ import logging from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from io import BytesIO -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional from pkg_resources import parse_version @@ -151,6 +151,7 @@ class SendEmailHandler: app_name: str, html: str, text: str, + additional_headers: Optional[Dict[str, str]] = None, ) -> None: """Send a multipart email with the given information. @@ -160,6 +161,7 @@ class SendEmailHandler: app_name: The app name to include in the From header. html: The HTML content to include in the email. text: The plain text content to include in the email. + additional_headers: A map of additional headers to include. """ try: from_string = self._from % {"app": app_name} @@ -181,6 +183,7 @@ class SendEmailHandler: multipart_msg["To"] = email_address multipart_msg["Date"] = email.utils.formatdate() multipart_msg["Message-ID"] = email.utils.make_msgid() + # Discourage automatic responses to Synapse's emails. # Per RFC 3834, automatic responses should not be sent if the "Auto-Submitted" # header is present with any value other than "no". See @@ -194,6 +197,11 @@ class SendEmailHandler: # https://stackoverflow.com/a/25324691/5252017 # https://stackoverflow.com/a/61646381/5252017 multipart_msg["X-Auto-Response-Suppress"] = "All" + + if additional_headers: + for header, value in additional_headers.items(): + multipart_msg[header] = value + multipart_msg.attach(text_part) multipart_msg.attach(html_part) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 79e0627b6a..b6cad18c2d 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -298,20 +298,26 @@ class Mailer: notifs_by_room, state_by_room, notif_events, reason ) + unsubscribe_link = self._make_unsubscribe_link(user_id, app_id, email_address) + template_vars: TemplateVars = { "user_display_name": user_display_name, - "unsubscribe_link": self._make_unsubscribe_link( - user_id, app_id, email_address - ), + "unsubscribe_link": unsubscribe_link, "summary_text": summary_text, "rooms": rooms, "reason": reason, } - await self.send_email(email_address, summary_text, template_vars) + await self.send_email( + email_address, summary_text, template_vars, unsubscribe_link + ) async def send_email( - self, email_address: str, subject: str, extra_template_vars: TemplateVars + self, + email_address: str, + subject: str, + extra_template_vars: TemplateVars, + unsubscribe_link: Optional[str] = None, ) -> None: """Send an email with the given information and template text""" template_vars: TemplateVars = { @@ -330,6 +336,23 @@ class Mailer: app_name=self.app_name, html=html_text, text=plain_text, + # Include the List-Unsubscribe header which some clients render in the UI. + # Per RFC 2369, this can be a URL or mailto URL. See + # https://www.rfc-editor.org/rfc/rfc2369.html#section-3.2 + # + # It is preferred to use email, but Synapse doesn't support incoming email. + # + # Also include the List-Unsubscribe-Post header from RFC 8058. See + # https://www.rfc-editor.org/rfc/rfc8058.html#section-3.1 + # + # Note that many email clients will not render the unsubscribe link + # unless DKIM, etc. is properly setup. + additional_headers={ + "List-Unsubscribe-Post": "List-Unsubscribe=One-Click", + "List-Unsubscribe": f"<{unsubscribe_link}>", + } + if unsubscribe_link + else None, ) async def _get_room_vars( diff --git a/synapse/rest/synapse/client/unsubscribe.py b/synapse/rest/synapse/client/unsubscribe.py index 60321018f9..050fd7bba1 100644 --- a/synapse/rest/synapse/client/unsubscribe.py +++ b/synapse/rest/synapse/client/unsubscribe.py @@ -38,6 +38,10 @@ class UnsubscribeResource(DirectServeHtmlResource): self.macaroon_generator = hs.get_macaroon_generator() async def _async_render_GET(self, request: SynapseRequest) -> None: + """ + Handle a user opening an unsubscribe link in the browser, either via an + HTML/Text email or via the List-Unsubscribe header. + """ token = parse_string(request, "access_token", required=True) app_id = parse_string(request, "app_id", required=True) pushkey = parse_string(request, "pushkey", required=True) @@ -62,3 +66,16 @@ class UnsubscribeResource(DirectServeHtmlResource): 200, UnsubscribeResource.SUCCESS_HTML, ) + + async def _async_render_POST(self, request: SynapseRequest) -> None: + """ + Handle a mail user agent POSTing to the unsubscribe URL via the + List-Unsubscribe & List-Unsubscribe-Post headers. + """ + + # TODO Assert that the body has a single field + + # Assert the body has form encoded key/value pair of + # List-Unsubscribe=One-Click. + + await self._async_render_GET(request) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 4b5c96aeae..73a430ddc6 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -13,10 +13,12 @@ # limitations under the License. import email.message import os +from http import HTTPStatus from typing import Any, Dict, List, Sequence, Tuple import attr import pkg_resources +from parameterized import parameterized from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor @@ -25,9 +27,11 @@ import synapse.rest.admin from synapse.api.errors import Codes, SynapseError from synapse.push.emailpusher import EmailPusher from synapse.rest.client import login, room +from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource from synapse.server import HomeServer from synapse.util import Clock +from tests.server import FakeSite, make_request from tests.unittest import HomeserverTestCase @@ -175,6 +179,57 @@ class EmailPusherTests(HomeserverTestCase): self._check_for_mail() + @parameterized.expand([(False,), (True,)]) + def test_unsubscribe(self, use_post: bool) -> None: + # Create a simple room with two users + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id + ) + self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) + + # The other user sends a single message. + self.helper.send(room, body="Hi!", tok=self.others[0].token) + + # We should get emailed about that message + args, kwargs = self._check_for_mail() + + # That email should contain an unsubscribe link in the body and header. + msg: bytes = args[5] + + # Multipart: plain text, base 64 encoded; html, base 64 encoded + multipart_msg = email.message_from_bytes(msg) + txt = multipart_msg.get_payload()[0].get_payload(decode=True).decode() + html = multipart_msg.get_payload()[1].get_payload(decode=True).decode() + self.assertIn("/_synapse/client/unsubscribe", txt) + self.assertIn("/_synapse/client/unsubscribe", html) + + # The unsubscribe headers should exist. + assert multipart_msg.get("List-Unsubscribe") is not None + self.assertIsNotNone(multipart_msg.get("List-Unsubscribe-Post")) + + # Open the unsubscribe link. + unsubscribe_link = multipart_msg["List-Unsubscribe"].strip("<>") + unsubscribe_resource = UnsubscribeResource(self.hs) + channel = make_request( + self.reactor, + FakeSite(unsubscribe_resource, self.reactor), + "POST" if use_post else "GET", + unsubscribe_link, + shorthand=False, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + # Ensure the pusher was removed. + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) + ) + self.assertEqual(pushers, []) + def test_invite_sends_email(self) -> None: # Create a room and invite the user to it room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) From efe778a0b8f4880b643a6e5d555715ee4a242608 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 12 Sep 2023 11:59:35 +0200 Subject: [PATCH 457/562] 1.92.0 --- CHANGES.md | 12 ++++++++++++ changelog.d/16255.misc | 1 - changelog.d/16258.bugfix | 1 - changelog.d/16266.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 6 files changed, 19 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/16255.misc delete mode 100644 changelog.d/16258.bugfix delete mode 100644 changelog.d/16266.misc diff --git a/CHANGES.md b/CHANGES.md index 47fc31a5cb..9c9949a202 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,15 @@ +# Synapse 1.92.0 (2023-09-12) + +### Bugfixes + +- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) + +### Internal Changes + +- Fix incorrect docstring for `Ratelimiter`. ([\#16255](https://github.com/matrix-org/synapse/issues/16255)) +- Update the release script to work on macOS. ([\#16266](https://github.com/matrix-org/synapse/issues/16266)) + + # Synapse 1.91.2 (2023-09-06) ### Bugfixes diff --git a/changelog.d/16255.misc b/changelog.d/16255.misc deleted file mode 100644 index 94d6aff1d6..0000000000 --- a/changelog.d/16255.misc +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect docstring for `Ratelimiter`. diff --git a/changelog.d/16258.bugfix b/changelog.d/16258.bugfix deleted file mode 100644 index d5ae2399e6..0000000000 --- a/changelog.d/16258.bugfix +++ /dev/null @@ -1 +0,0 @@ -Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. diff --git a/changelog.d/16266.misc b/changelog.d/16266.misc deleted file mode 100644 index ac594c4ac4..0000000000 --- a/changelog.d/16266.misc +++ /dev/null @@ -1 +0,0 @@ -Update the release script to work on macOS. diff --git a/debian/changelog b/debian/changelog index 81baa6e405..dfe88c12a4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.92.0) stable; urgency=medium + + * New Synapse release 1.92.0. + + -- Synapse Packaging team Tue, 12 Sep 2023 11:59:23 +0200 + matrix-synapse-py3 (1.91.2) stable; urgency=medium * New synapse release 1.91.2. diff --git a/pyproject.toml b/pyproject.toml index c17f4da72d..f46303ae1b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.92.0rc1" +version = "1.92.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 4a5bf74372d602fb2a310b89324ac7f467020748 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 12 Sep 2023 12:06:47 +0200 Subject: [PATCH 458/562] Inverse changelog order --- CHANGES.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9c9949a202..3e2b0a36f3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,12 +10,6 @@ - Update the release script to work on macOS. ([\#16266](https://github.com/matrix-org/synapse/issues/16266)) -# Synapse 1.91.2 (2023-09-06) - -### Bugfixes - -- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) - # Synapse 1.92.0rc1 (2023-09-05) ### Features @@ -71,6 +65,13 @@ * Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11. ([\#16200](https://github.com/matrix-org/synapse/issues/16200)) * Bump types-pyyaml from 6.0.12.10 to 6.0.12.11. ([\#16199](https://github.com/matrix-org/synapse/issues/16199)) +# Synapse 1.91.2 (2023-09-06) + +### Bugfixes + +- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) + + # Synapse 1.91.1 (2023-09-04) ### Bugfixes From 2b35626b6b7aed52a626734a5a85fe77c847251d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 Sep 2023 11:08:04 +0100 Subject: [PATCH 459/562] Refactor storing of server keys (#16261) --- changelog.d/16261.misc | 1 + synapse/crypto/keyring.py | 35 +--- synapse/storage/databases/main/keys.py | 229 +++++++++---------------- tests/crypto/test_keyring.py | 53 ++---- tests/storage/test_keys.py | 137 --------------- tests/unittest.py | 26 +-- 6 files changed, 111 insertions(+), 370 deletions(-) create mode 100644 changelog.d/16261.misc delete mode 100644 tests/storage/test_keys.py diff --git a/changelog.d/16261.misc b/changelog.d/16261.misc new file mode 100644 index 0000000000..d3ad59ca4a --- /dev/null +++ b/changelog.d/16261.misc @@ -0,0 +1 @@ +Simplify server key storage. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 260aab3241..fe86f54d80 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -23,12 +23,7 @@ from signedjson.key import ( get_verify_key, is_signing_algorithm_supported, ) -from signedjson.sign import ( - SignatureVerifyException, - encode_canonical_json, - signature_ids, - verify_signed_json, -) +from signedjson.sign import SignatureVerifyException, signature_ids, verify_signed_json from signedjson.types import VerifyKey from unpaddedbase64 import decode_base64 @@ -596,24 +591,12 @@ class BaseV2KeyFetcher(KeyFetcher): verify_key=verify_key, valid_until_ts=key_data["expired_ts"] ) - key_json_bytes = encode_canonical_json(response_json) - - await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store.store_server_keys_json, - server_name=server_name, - key_id=key_id, - from_server=from_server, - ts_now_ms=time_added_ms, - ts_expires_ms=ts_valid_until_ms, - key_json_bytes=key_json_bytes, - ) - for key_id in verify_keys - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) + await self.store.store_server_keys_response( + server_name=server_name, + from_server=from_server, + ts_added_ms=time_added_ms, + verify_keys=verify_keys, + response_json=response_json, ) return verify_keys @@ -775,10 +758,6 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): keys.setdefault(server_name, {}).update(processed_response) - await self.store.store_server_signature_keys( - perspective_name, time_now_ms, added_keys - ) - return keys def _validate_perspectives_response( diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 57aa4921e1..41563371dc 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -16,14 +16,17 @@ import itertools import json import logging -from typing import Dict, Iterable, Mapping, Optional, Tuple +from typing import Dict, Iterable, Optional, Tuple +from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes from unpaddedbase64 import decode_base64 +from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.keys import FetchKeyResult, FetchKeyResultForRemote from synapse.storage.types import Cursor +from synapse.types import JsonDict from synapse.util.caches.descriptors import cached, cachedList from synapse.util.iterutils import batch_iter @@ -36,162 +39,84 @@ db_binary_type = memoryview class KeyStore(CacheInvalidationWorkerStore): """Persistence for signature verification keys""" - @cached() - def _get_server_signature_key( - self, server_name_and_key_id: Tuple[str, str] - ) -> FetchKeyResult: - raise NotImplementedError() - - @cachedList( - cached_method_name="_get_server_signature_key", - list_name="server_name_and_key_ids", - ) - async def get_server_signature_keys( - self, server_name_and_key_ids: Iterable[Tuple[str, str]] - ) -> Dict[Tuple[str, str], FetchKeyResult]: - """ - Args: - server_name_and_key_ids: - iterable of (server_name, key-id) tuples to fetch keys for - - Returns: - A map from (server_name, key_id) -> FetchKeyResult, or None if the - key is unknown - """ - keys = {} - - def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None: - """Processes a batch of keys to fetch, and adds the result to `keys`.""" - - # batch_iter always returns tuples so it's safe to do len(batch) - sql = """ - SELECT server_name, key_id, verify_key, ts_valid_until_ms - FROM server_signature_keys WHERE 1=0 - """ + " OR (server_name=? AND key_id=?)" * len( - batch - ) - - txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) - - for row in txn: - server_name, key_id, key_bytes, ts_valid_until_ms = row - - if ts_valid_until_ms is None: - # Old keys may be stored with a ts_valid_until_ms of null, - # in which case we treat this as if it was set to `0`, i.e. - # it won't match key requests that define a minimum - # `ts_valid_until_ms`. - ts_valid_until_ms = 0 - - keys[(server_name, key_id)] = FetchKeyResult( - verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)), - valid_until_ts=ts_valid_until_ms, - ) - - def _txn(txn: Cursor) -> Dict[Tuple[str, str], FetchKeyResult]: - for batch in batch_iter(server_name_and_key_ids, 50): - _get_keys(txn, batch) - return keys - - return await self.db_pool.runInteraction("get_server_signature_keys", _txn) - - async def store_server_signature_keys( - self, - from_server: str, - ts_added_ms: int, - verify_keys: Mapping[Tuple[str, str], FetchKeyResult], - ) -> None: - """Stores NACL verification keys for remote servers. - Args: - from_server: Where the verification keys were looked up - ts_added_ms: The time to record that the key was added - verify_keys: - keys to be stored. Each entry is a triplet of - (server_name, key_id, key). - """ - key_values = [] - value_values = [] - invalidations = [] - for (server_name, key_id), fetch_result in verify_keys.items(): - key_values.append((server_name, key_id)) - value_values.append( - ( - from_server, - ts_added_ms, - fetch_result.valid_until_ts, - db_binary_type(fetch_result.verify_key.encode()), - ) - ) - # invalidate takes a tuple corresponding to the params of - # _get_server_signature_key. _get_server_signature_key only takes one - # param, which is itself the 2-tuple (server_name, key_id). - invalidations.append((server_name, key_id)) - - await self.db_pool.simple_upsert_many( - table="server_signature_keys", - key_names=("server_name", "key_id"), - key_values=key_values, - value_names=( - "from_server", - "ts_added_ms", - "ts_valid_until_ms", - "verify_key", - ), - value_values=value_values, - desc="store_server_signature_keys", - ) - - invalidate = self._get_server_signature_key.invalidate - for i in invalidations: - invalidate((i,)) - - async def store_server_keys_json( + async def store_server_keys_response( self, server_name: str, - key_id: str, from_server: str, - ts_now_ms: int, - ts_expires_ms: int, - key_json_bytes: bytes, + ts_added_ms: int, + verify_keys: Dict[str, FetchKeyResult], + response_json: JsonDict, ) -> None: - """Stores the JSON bytes for a set of keys from a server - The JSON should be signed by the originating server, the intermediate - server, and by this server. Updates the value for the - (server_name, key_id, from_server) triplet if one already existed. - Args: - server_name: The name of the server. - key_id: The identifier of the key this JSON is for. - from_server: The server this JSON was fetched from. - ts_now_ms: The time now in milliseconds. - ts_valid_until_ms: The time when this json stops being valid. - key_json_bytes: The encoded JSON. - """ - await self.db_pool.simple_upsert( - table="server_keys_json", - keyvalues={ - "server_name": server_name, - "key_id": key_id, - "from_server": from_server, - }, - values={ - "server_name": server_name, - "key_id": key_id, - "from_server": from_server, - "ts_added_ms": ts_now_ms, - "ts_valid_until_ms": ts_expires_ms, - "key_json": db_binary_type(key_json_bytes), - }, - desc="store_server_keys_json", - ) + """Stores the keys for the given server that we got from `from_server`. - # invalidate takes a tuple corresponding to the params of - # _get_server_keys_json. _get_server_keys_json only takes one - # param, which is itself the 2-tuple (server_name, key_id). - await self.invalidate_cache_and_stream( - "_get_server_keys_json", ((server_name, key_id),) - ) - await self.invalidate_cache_and_stream( - "get_server_key_json_for_remote", (server_name, key_id) + Args: + server_name: The owner of the keys + from_server: Which server we got the keys from + ts_added_ms: When we're adding the keys + verify_keys: The decoded keys + response_json: The full *signed* response JSON that contains the keys. + """ + + key_json_bytes = encode_canonical_json(response_json) + + def store_server_keys_response_txn(txn: LoggingTransaction) -> None: + self.db_pool.simple_upsert_many_txn( + txn, + table="server_signature_keys", + key_names=("server_name", "key_id"), + key_values=[(server_name, key_id) for key_id in verify_keys], + value_names=( + "from_server", + "ts_added_ms", + "ts_valid_until_ms", + "verify_key", + ), + value_values=[ + ( + from_server, + ts_added_ms, + fetch_result.valid_until_ts, + db_binary_type(fetch_result.verify_key.encode()), + ) + for fetch_result in verify_keys.values() + ], + ) + + self.db_pool.simple_upsert_many_txn( + txn, + table="server_keys_json", + key_names=("server_name", "key_id", "from_server"), + key_values=[ + (server_name, key_id, from_server) for key_id in verify_keys + ], + value_names=( + "ts_added_ms", + "ts_valid_until_ms", + "key_json", + ), + value_values=[ + ( + ts_added_ms, + fetch_result.valid_until_ts, + db_binary_type(key_json_bytes), + ) + for fetch_result in verify_keys.values() + ], + ) + + # invalidate takes a tuple corresponding to the params of + # _get_server_keys_json. _get_server_keys_json only takes one + # param, which is itself the 2-tuple (server_name, key_id). + for key_id in verify_keys: + self._invalidate_cache_and_stream( + txn, self._get_server_keys_json, ((server_name, key_id),) + ) + self._invalidate_cache_and_stream( + txn, self.get_server_key_json_for_remote, (server_name, key_id) + ) + + await self.db_pool.runInteraction( + "store_server_keys_response", store_server_keys_response_txn ) @cached() diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index f93ba5d4cf..c5700771b0 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -13,7 +13,7 @@ # limitations under the License. import time from typing import Any, Dict, List, Optional, cast -from unittest.mock import AsyncMock, Mock +from unittest.mock import Mock import attr import canonicaljson @@ -189,23 +189,24 @@ class KeyringTestCase(unittest.HomeserverTestCase): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key("1") - r = self.hs.get_datastores().main.store_server_keys_json( + r = self.hs.get_datastores().main.store_server_keys_response( "server9", - get_key_id(key1), from_server="test", - ts_now_ms=int(time.time() * 1000), - ts_expires_ms=1000, + ts_added_ms=int(time.time() * 1000), + verify_keys={ + get_key_id(key1): FetchKeyResult( + verify_key=get_verify_key(key1), valid_until_ts=1000 + ) + }, # The entire response gets signed & stored, just include the bits we # care about. - key_json_bytes=canonicaljson.encode_canonical_json( - { - "verify_keys": { - get_key_id(key1): { - "key": encode_verify_key_base64(get_verify_key(key1)) - } + response_json={ + "verify_keys": { + get_key_id(key1): { + "key": encode_verify_key_base64(get_verify_key(key1)) } } - ), + }, ) self.get_success(r) @@ -285,34 +286,6 @@ class KeyringTestCase(unittest.HomeserverTestCase): d = kr.verify_json_for_server(self.hs.hostname, json1, 0) self.get_success(d) - def test_verify_json_for_server_with_null_valid_until_ms(self) -> None: - """Tests that we correctly handle key requests for keys we've stored - with a null `ts_valid_until_ms` - """ - mock_fetcher = Mock() - mock_fetcher.get_keys = AsyncMock(return_value={}) - - key1 = signedjson.key.generate_signing_key("1") - r = self.hs.get_datastores().main.store_server_signature_keys( - "server9", - int(time.time() * 1000), - # None is not a valid value in FetchKeyResult, but we're abusing this - # API to insert null values into the database. The nulls get converted - # to 0 when fetched in KeyStore.get_server_signature_keys. - {("server9", get_key_id(key1)): FetchKeyResult(get_verify_key(key1), None)}, # type: ignore[arg-type] - ) - self.get_success(r) - - json1: JsonDict = {} - signedjson.sign.sign_json(json1, "server9", key1) - - # should succeed on a signed object with a 0 minimum_valid_until_ms - d = self.hs.get_datastores().main.get_server_signature_keys( - [("server9", get_key_id(key1))] - ) - result = self.get_success(d) - self.assertEqual(result[("server9", get_key_id(key1))].valid_until_ts, 0) - def test_verify_json_dedupes_key_requests(self) -> None: """Two requests for the same key should be deduped.""" key1 = signedjson.key.generate_signing_key("1") diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py deleted file mode 100644 index 5d7c13e6d0..0000000000 --- a/tests/storage/test_keys.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import signedjson.key -import signedjson.types -import unpaddedbase64 - -from synapse.storage.keys import FetchKeyResult - -import tests.unittest - - -def decode_verify_key_base64( - key_id: str, key_base64: str -) -> signedjson.types.VerifyKey: - key_bytes = unpaddedbase64.decode_base64(key_base64) - return signedjson.key.decode_verify_key_bytes(key_id, key_bytes) - - -KEY_1 = decode_verify_key_base64( - "ed25519:key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" -) -KEY_2 = decode_verify_key_base64( - "ed25519:key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" -) - - -class KeyStoreTestCase(tests.unittest.HomeserverTestCase): - def test_get_server_signature_keys(self) -> None: - store = self.hs.get_datastores().main - - key_id_1 = "ed25519:key1" - key_id_2 = "ed25519:KEY_ID_2" - self.get_success( - store.store_server_signature_keys( - "from_server", - 10, - { - ("server1", key_id_1): FetchKeyResult(KEY_1, 100), - ("server1", key_id_2): FetchKeyResult(KEY_2, 200), - }, - ) - ) - - res = self.get_success( - store.get_server_signature_keys( - [ - ("server1", key_id_1), - ("server1", key_id_2), - ("server1", "ed25519:key3"), - ] - ) - ) - - self.assertEqual(len(res.keys()), 3) - res1 = res[("server1", key_id_1)] - self.assertEqual(res1.verify_key, KEY_1) - self.assertEqual(res1.verify_key.version, "key1") - self.assertEqual(res1.valid_until_ts, 100) - - res2 = res[("server1", key_id_2)] - self.assertEqual(res2.verify_key, KEY_2) - # version comes from the ID it was stored with - self.assertEqual(res2.verify_key.version, "KEY_ID_2") - self.assertEqual(res2.valid_until_ts, 200) - - # non-existent result gives None - self.assertIsNone(res[("server1", "ed25519:key3")]) - - def test_cache(self) -> None: - """Check that updates correctly invalidate the cache.""" - - store = self.hs.get_datastores().main - - key_id_1 = "ed25519:key1" - key_id_2 = "ed25519:key2" - - self.get_success( - store.store_server_signature_keys( - "from_server", - 0, - { - ("srv1", key_id_1): FetchKeyResult(KEY_1, 100), - ("srv1", key_id_2): FetchKeyResult(KEY_2, 200), - }, - ) - ) - - res = self.get_success( - store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - ) - self.assertEqual(len(res.keys()), 2) - - res1 = res[("srv1", key_id_1)] - self.assertEqual(res1.verify_key, KEY_1) - self.assertEqual(res1.valid_until_ts, 100) - - res2 = res[("srv1", key_id_2)] - self.assertEqual(res2.verify_key, KEY_2) - self.assertEqual(res2.valid_until_ts, 200) - - # we should be able to look up the same thing again without a db hit - res = self.get_success(store.get_server_signature_keys([("srv1", key_id_1)])) - self.assertEqual(len(res.keys()), 1) - self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1) - - new_key_2 = signedjson.key.get_verify_key( - signedjson.key.generate_signing_key("key2") - ) - d = store.store_server_signature_keys( - "from_server", 10, {("srv1", key_id_2): FetchKeyResult(new_key_2, 300)} - ) - self.get_success(d) - - res = self.get_success( - store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - ) - self.assertEqual(len(res.keys()), 2) - - res1 = res[("srv1", key_id_1)] - self.assertEqual(res1.verify_key, KEY_1) - self.assertEqual(res1.valid_until_ts, 100) - - res2 = res[("srv1", key_id_2)] - self.assertEqual(res2.verify_key, new_key_2) - self.assertEqual(res2.valid_until_ts, 300) diff --git a/tests/unittest.py b/tests/unittest.py index 5d3640d8ac..dbaff361b4 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -70,6 +70,7 @@ from synapse.logging.context import ( ) from synapse.rest import RegisterServletsFunc from synapse.server import HomeServer +from synapse.storage.keys import FetchKeyResult from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree @@ -858,23 +859,22 @@ class FederatingHomeserverTestCase(HomeserverTestCase): verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) self.get_success( - hs.get_datastores().main.store_server_keys_json( + hs.get_datastores().main.store_server_keys_response( self.OTHER_SERVER_NAME, - verify_key_id, from_server=self.OTHER_SERVER_NAME, - ts_now_ms=clock.time_msec(), - ts_expires_ms=clock.time_msec() + 10000, - key_json_bytes=canonicaljson.encode_canonical_json( - { - "verify_keys": { - verify_key_id: { - "key": signedjson.key.encode_verify_key_base64( - verify_key - ) - } + ts_added_ms=clock.time_msec(), + verify_keys={ + verify_key_id: FetchKeyResult( + verify_key=verify_key, valid_until_ts=clock.time_msec() + 10000 + ), + }, + response_json={ + "verify_keys": { + verify_key_id: { + "key": signedjson.key.encode_verify_key_base64(verify_key) } } - ), + }, ) ) From 622463636cd8d14c980515e322f93f3e9af90af4 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 12 Sep 2023 12:16:44 +0200 Subject: [PATCH 460/562] Remove kinetic deb build, it's EOL --- scripts-dev/build_debian_packages.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index c03e3418c0..b192faba14 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -32,7 +32,6 @@ DISTS = ( "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) - "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04) "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) "debian:trixie", # (EOL not specified yet) ) From 16ef6f1e3c8d0cfe959e4209fd04528658383ab4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 12 Sep 2023 07:12:31 -0400 Subject: [PATCH 461/562] Stop purging tables which are slated for removal. (#16273) --- changelog.d/16273.misc | 1 + synapse/storage/databases/main/purge_events.py | 4 ---- synapse/storage/schema/__init__.py | 6 +++++- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/16273.misc diff --git a/changelog.d/16273.misc b/changelog.d/16273.misc new file mode 100644 index 0000000000..19882f6754 --- /dev/null +++ b/changelog.d/16273.misc @@ -0,0 +1 @@ +Stop purging from tables slated for removal. diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index b52f48cf04..dea0e0458c 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -450,10 +450,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): "e2e_room_keys", "event_push_summary", "pusher_throttle", - "insertion_events", - "insertion_event_extremities", - "insertion_event_edges", - "batch_events", "room_account_data", "room_tags", # "rooms" happens last, to keep the foreign keys in the other tables diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 422f11f59e..5b50bd66bc 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 81 # remember to update the list below when updating +SCHEMA_VERSION = 82 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -117,6 +117,10 @@ Changes in SCHEMA_VERSION = 80 Changes in SCHEMA_VERSION = 81 - The event_txn_id is no longer written to for new events. + +Changes in SCHEMA_VERSION = 82 + - The insertion_events, insertion_event_extremities, insertion_event_edges, and + batch_events tables are no longer purged in preparation for their removal. """ From ba48c563c98966400488c8972d2e9964f9510399 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 12 Sep 2023 07:16:09 -0400 Subject: [PATCH 462/562] Bump mypy from 1.4.1 to 1.5.1. (#16300) --- changelog.d/16300.misc | 1 + mypy.ini | 1 - poetry.lock | 68 +++++++++++++++------------------- synapse/logging/opentracing.py | 10 +---- 4 files changed, 32 insertions(+), 48 deletions(-) create mode 100644 changelog.d/16300.misc diff --git a/changelog.d/16300.misc b/changelog.d/16300.misc new file mode 100644 index 0000000000..8cc2e52369 --- /dev/null +++ b/changelog.d/16300.misc @@ -0,0 +1 @@ +Bump mypy from 1.4.1 to 1.5.1. diff --git a/mypy.ini b/mypy.ini index fb5f44c939..88aea301b9 100644 --- a/mypy.ini +++ b/mypy.ini @@ -23,7 +23,6 @@ warn_unused_ignores = True # warn_return_any = True # no_implicit_reexport = True strict_equality = True -strict_concatenate = True # Run mypy type checking with the minimum supported Python version to catch new usage # that isn't backwards-compatible (types, overloads, etc). diff --git a/poetry.lock b/poetry.lock index e4cea28282..c01312579e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1445,37 +1445,38 @@ files = [ [[package]] name = "mypy" -version = "1.4.1" +version = "1.5.1" description = "Optional static typing for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mypy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8"}, - {file = "mypy-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878"}, - {file = "mypy-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd"}, - {file = "mypy-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc"}, - {file = "mypy-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1"}, - {file = "mypy-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462"}, - {file = "mypy-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258"}, - {file = "mypy-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2"}, - {file = "mypy-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7"}, - {file = "mypy-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01"}, - {file = "mypy-1.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b"}, - {file = "mypy-1.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b"}, - {file = "mypy-1.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7"}, - {file = "mypy-1.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9"}, - {file = "mypy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042"}, - {file = "mypy-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3"}, - {file = "mypy-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6"}, - {file = "mypy-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f"}, - {file = "mypy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc"}, - {file = "mypy-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828"}, - {file = "mypy-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3"}, - {file = "mypy-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816"}, - {file = "mypy-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c"}, - {file = "mypy-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f"}, - {file = "mypy-1.4.1-py3-none-any.whl", hash = "sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4"}, - {file = "mypy-1.4.1.tar.gz", hash = "sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b"}, + {file = "mypy-1.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f33592ddf9655a4894aef22d134de7393e95fcbdc2d15c1ab65828eee5c66c70"}, + {file = "mypy-1.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:258b22210a4a258ccd077426c7a181d789d1121aca6db73a83f79372f5569ae0"}, + {file = "mypy-1.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9ec1f695f0c25986e6f7f8778e5ce61659063268836a38c951200c57479cc12"}, + {file = "mypy-1.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:abed92d9c8f08643c7d831300b739562b0a6c9fcb028d211134fc9ab20ccad5d"}, + {file = "mypy-1.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:a156e6390944c265eb56afa67c74c0636f10283429171018446b732f1a05af25"}, + {file = "mypy-1.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6ac9c21bfe7bc9f7f1b6fae441746e6a106e48fc9de530dea29e8cd37a2c0cc4"}, + {file = "mypy-1.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:51cb1323064b1099e177098cb939eab2da42fea5d818d40113957ec954fc85f4"}, + {file = "mypy-1.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:596fae69f2bfcb7305808c75c00f81fe2829b6236eadda536f00610ac5ec2243"}, + {file = "mypy-1.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:32cb59609b0534f0bd67faebb6e022fe534bdb0e2ecab4290d683d248be1b275"}, + {file = "mypy-1.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:159aa9acb16086b79bbb0016145034a1a05360626046a929f84579ce1666b315"}, + {file = "mypy-1.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f6b0e77db9ff4fda74de7df13f30016a0a663928d669c9f2c057048ba44f09bb"}, + {file = "mypy-1.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:26f71b535dfc158a71264e6dc805a9f8d2e60b67215ca0bfa26e2e1aa4d4d373"}, + {file = "mypy-1.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fc3a600f749b1008cc75e02b6fb3d4db8dbcca2d733030fe7a3b3502902f161"}, + {file = "mypy-1.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:26fb32e4d4afa205b24bf645eddfbb36a1e17e995c5c99d6d00edb24b693406a"}, + {file = "mypy-1.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:82cb6193de9bbb3844bab4c7cf80e6227d5225cc7625b068a06d005d861ad5f1"}, + {file = "mypy-1.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4a465ea2ca12804d5b34bb056be3a29dc47aea5973b892d0417c6a10a40b2d65"}, + {file = "mypy-1.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9fece120dbb041771a63eb95e4896791386fe287fefb2837258925b8326d6160"}, + {file = "mypy-1.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d28ddc3e3dfeab553e743e532fb95b4e6afad51d4706dd22f28e1e5e664828d2"}, + {file = "mypy-1.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:57b10c56016adce71fba6bc6e9fd45d8083f74361f629390c556738565af8eeb"}, + {file = "mypy-1.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:ff0cedc84184115202475bbb46dd99f8dcb87fe24d5d0ddfc0fe6b8575c88d2f"}, + {file = "mypy-1.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8f772942d372c8cbac575be99f9cc9d9fb3bd95c8bc2de6c01411e2c84ebca8a"}, + {file = "mypy-1.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5d627124700b92b6bbaa99f27cbe615c8ea7b3402960f6372ea7d65faf376c14"}, + {file = "mypy-1.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:361da43c4f5a96173220eb53340ace68cda81845cd88218f8862dfb0adc8cddb"}, + {file = "mypy-1.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:330857f9507c24de5c5724235e66858f8364a0693894342485e543f5b07c8693"}, + {file = "mypy-1.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:c543214ffdd422623e9fedd0869166c2f16affe4ba37463975043ef7d2ea8770"}, + {file = "mypy-1.5.1-py3-none-any.whl", hash = "sha256:f757063a83970d67c444f6e01d9550a7402322af3557ce7630d3c957386fa8f5"}, + {file = "mypy-1.5.1.tar.gz", hash = "sha256:b031b9601f1060bf1281feab89697324726ba0c0bae9d7cd7ab4b690940f0b92"}, ] [package.dependencies] @@ -1486,7 +1487,6 @@ typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] install-types = ["pip"] -python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] [[package]] @@ -2077,7 +2077,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2085,15 +2084,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2110,7 +2102,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2118,7 +2109,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 5c3045e197..4454fe29a5 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -991,11 +991,7 @@ def trace_with_opname( if not opentracing: return func - # type-ignore: mypy seems to be confused by the ParamSpecs here. - # I think the problem is https://github.com/python/mypy/issues/12909 - return _custom_sync_async_decorator( - func, _wrapping_logic # type: ignore[arg-type] - ) + return _custom_sync_async_decorator(func, _wrapping_logic) return _decorator @@ -1040,9 +1036,7 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) yield - # type-ignore: mypy seems to be confused by the ParamSpecs here. - # I think the problem is https://github.com/python/mypy/issues/12909 - return _custom_sync_async_decorator(func, _wrapping_logic) # type: ignore[arg-type] + return _custom_sync_async_decorator(func, _wrapping_logic) @contextlib.contextmanager From 1296e471c343e001cd6bec4f5395811b3e755116 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 12 Sep 2023 13:20:48 +0200 Subject: [PATCH 463/562] 1.92.1 --- CHANGES.md | 5 +++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 3e2b0a36f3..13c53d2606 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,8 @@ +# Synapse 1.92.1 (2023-09-12) + +Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. + + # Synapse 1.92.0 (2023-09-12) ### Bugfixes diff --git a/debian/changelog b/debian/changelog index dfe88c12a4..9553967098 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.92.1) stable; urgency=medium + + * New Synapse release 1.92.1. + + -- Synapse Packaging team Tue, 12 Sep 2023 13:19:42 +0200 + matrix-synapse-py3 (1.92.0) stable; urgency=medium * New Synapse release 1.92.0. diff --git a/pyproject.toml b/pyproject.toml index f46303ae1b..821b13f5c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.92.0" +version = "1.92.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 48387c56f11f2f4173291feaf36375ae68bb5507 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 12 Sep 2023 15:34:10 +0200 Subject: [PATCH 464/562] Update changelog --- CHANGES.md | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 13c53d2606..8513ca47f0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,18 @@ # Synapse 1.92.1 (2023-09-12) -Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. +This minor release was needed only because of CI-related trouble on [v1.92.0](https://github.com/matrix-org/synapse/releases/tag/v1.92.0), which was never released. + +### Internal Changes + +- Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. # Synapse 1.92.0 (2023-09-12) +This release includes the same [bugfix](https://github.com/matrix-org/synapse/issues/16258) as Synapse 1.91.2. + +This version was never released following a CI build failure, cf [v1.92.1 changelog](https://github.com/matrix-org/synapse/releases/tag/v1.92.1). + ### Bugfixes - Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) @@ -15,6 +23,13 @@ Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. - Update the release script to work on macOS. ([\#16266](https://github.com/matrix-org/synapse/issues/16266)) +# Synapse 1.91.2 (2023-09-06) + +### Bugfixes + +- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) + + # Synapse 1.92.0rc1 (2023-09-05) ### Features @@ -70,12 +85,6 @@ Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. * Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11. ([\#16200](https://github.com/matrix-org/synapse/issues/16200)) * Bump types-pyyaml from 6.0.12.10 to 6.0.12.11. ([\#16199](https://github.com/matrix-org/synapse/issues/16199)) -# Synapse 1.91.2 (2023-09-06) - -### Bugfixes - -- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) - # Synapse 1.91.1 (2023-09-04) From ab13fb08bf7c20a992ec2796c72d0fbb2a06545c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 10:51:50 +0100 Subject: [PATCH 465/562] Improve logging of replication (#16309) --- changelog.d/16309.misc | 1 + synapse/replication/tcp/handler.py | 2 +- synapse/replication/tcp/resource.py | 7 ++++++- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16309.misc diff --git a/changelog.d/16309.misc b/changelog.d/16309.misc new file mode 100644 index 0000000000..bef5563ee9 --- /dev/null +++ b/changelog.d/16309.misc @@ -0,0 +1 @@ +Small improvements to logging in replication code. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index d9045d7b73..5642666411 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -644,7 +644,7 @@ class ReplicationCommandHandler: [stream.parse_row(row) for row in rows], ) - logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) + logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position( diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 347467d863..1d9a29d22e 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -191,7 +191,12 @@ class ReplicationStreamer: if updates: logger.info( - "Streaming: %s -> %s", stream.NAME, updates[-1][0] + "Streaming: %s -> %s (limited: %s, updates: %s, max token: %s)", + stream.NAME, + updates[-1][0], + limited, + len(updates), + current_token, ) stream_updates_counter.labels(stream.NAME).inc(len(updates)) From be3c7b08a3e6888e60497a80ebd143bd4df9a719 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 11:54:16 +0100 Subject: [PATCH 466/562] Fix deleting device inbox when using background worker (#16311) Introduced in #16240 The action for the task was only defined on the "master" handler, rather than the base worker one. --- changelog.d/16311.misc | 1 + synapse/handlers/device.py | 62 +++++++++++++++++++------------------- 2 files changed, 32 insertions(+), 31 deletions(-) create mode 100644 changelog.d/16311.misc diff --git a/changelog.d/16311.misc b/changelog.d/16311.misc new file mode 100644 index 0000000000..4f266c1fb0 --- /dev/null +++ b/changelog.d/16311.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9d240ad4ee..e2ae3da67e 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -91,9 +91,14 @@ class DeviceWorkerHandler: self._query_appservices_for_keys = ( hs.config.experimental.msc3984_appservice_key_query ) + self._task_scheduler = hs.get_task_scheduler() self.device_list_updater = DeviceListWorkerUpdater(hs) + self._task_scheduler.register_action( + self._delete_device_messages, DELETE_DEVICE_MSGS_TASK_NAME + ) + @trace async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: """ @@ -383,6 +388,32 @@ class DeviceWorkerHandler: "Trying handling device list state for partial join: not supported on workers." ) + DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 + + async def _delete_device_messages( + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" + assert task.params is not None + user_id = task.params["user_id"] + device_id = task.params["device_id"] + up_to_stream_id = task.params["up_to_stream_id"] + + res = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=up_to_stream_id, + limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, + ) + + if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: + return TaskStatus.COMPLETE, None, None + else: + # There is probably still device messages to be deleted, let's keep the task active and it will be run + # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). + return TaskStatus.ACTIVE, None, None + class DeviceHandler(DeviceWorkerHandler): device_list_updater: "DeviceListUpdater" @@ -394,7 +425,6 @@ class DeviceHandler(DeviceWorkerHandler): self._account_data_handler = hs.get_account_data_handler() self._storage_controllers = hs.get_storage_controllers() self.db_pool = hs.get_datastores().main.db_pool - self._task_scheduler = hs.get_task_scheduler() self.device_list_updater = DeviceListUpdater(hs, self) @@ -428,10 +458,6 @@ class DeviceHandler(DeviceWorkerHandler): self._delete_stale_devices, ) - self._task_scheduler.register_action( - self._delete_device_messages, DELETE_DEVICE_MSGS_TASK_NAME - ) - def _check_device_name_length(self, name: Optional[str]) -> None: """ Checks whether a device name is longer than the maximum allowed length. @@ -590,32 +616,6 @@ class DeviceHandler(DeviceWorkerHandler): await self.notify_device_update(user_id, device_ids) - DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 - - async def _delete_device_messages( - self, - task: ScheduledTask, - ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: - """Scheduler task to delete device messages in batch of `DEVICE_MSGS_DELETE_BATCH_LIMIT`.""" - assert task.params is not None - user_id = task.params["user_id"] - device_id = task.params["device_id"] - up_to_stream_id = task.params["up_to_stream_id"] - - res = await self.store.delete_messages_for_device( - user_id=user_id, - device_id=device_id, - up_to_stream_id=up_to_stream_id, - limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, - ) - - if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: - return TaskStatus.COMPLETE, None, None - else: - # There is probably still device messages to be deleted, let's keep the task active and it will be run - # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). - return TaskStatus.ACTIVE, None, None - async def update_device(self, user_id: str, device_id: str, content: dict) -> None: """Update the given device From e9addf6a01ab173bcf0aeeae35d7052a5bde9454 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 11:59:44 +0100 Subject: [PATCH 467/562] Don't schedule an async task on every sync (#16312) --- changelog.d/16312.misc | 1 + synapse/handlers/sync.py | 37 ++++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 11 deletions(-) create mode 100644 changelog.d/16312.misc diff --git a/changelog.d/16312.misc b/changelog.d/16312.misc new file mode 100644 index 0000000000..4f266c1fb0 --- /dev/null +++ b/changelog.d/16312.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0ccd7d250c..f1f19666d7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -362,21 +362,36 @@ class SyncHandler: # (since we now know that the device has received them) if since_token is not None: since_stream_id = since_token.to_device_key - # Delete device messages asynchronously and in batches using the task scheduler - await self._task_scheduler.schedule_task( - DELETE_DEVICE_MSGS_TASK_NAME, - resource_id=sync_config.device_id, - params={ - "user_id": sync_config.user.to_string(), - "device_id": sync_config.device_id, - "up_to_stream_id": since_stream_id, - }, + # Fast path: delete a limited number of to-device messages up front. + # We do this to avoid the overhead of scheduling a task for every + # sync. + device_deletion_limit = 100 + deleted = await self.store.delete_messages_for_device( + sync_config.user.to_string(), + sync_config.device_id, + since_stream_id, + limit=device_deletion_limit, ) logger.debug( - "Deletion of to-device messages up to %d scheduled", - since_stream_id, + "Deleted %d to-device messages up to %d", deleted, since_stream_id ) + # If we hit the limit, schedule a background task to delete the rest. + if deleted >= device_deletion_limit: + await self._task_scheduler.schedule_task( + DELETE_DEVICE_MSGS_TASK_NAME, + resource_id=sync_config.device_id, + params={ + "user_id": sync_config.user.to_string(), + "device_id": sync_config.device_id, + "up_to_stream_id": since_stream_id, + }, + ) + logger.debug( + "Deletion of to-device messages up to %d scheduled", + since_stream_id, + ) + if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. From d38d0dffc94b6269ed7ff5163d60958be3e6c304 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 13 Sep 2023 07:57:19 -0400 Subject: [PATCH 468/562] Use StrCollection in additional places. (#16301) --- changelog.d/16301.misc | 1 + synapse/app/_base.py | 12 +++---- synapse/config/_base.py | 3 +- synapse/events/__init__.py | 5 ++- synapse/events/builder.py | 8 ++--- synapse/events/validator.py | 6 ++-- synapse/http/client.py | 5 ++- synapse/http/servlet.py | 33 +++++++++---------- synapse/metrics/__init__.py | 8 ++--- synapse/notifier.py | 6 ++-- synapse/rest/client/_base.py | 4 +-- synapse/state/__init__.py | 13 ++++---- synapse/state/v1.py | 5 ++- synapse/state/v2.py | 7 ++-- .../databases/main/event_federation.py | 4 +-- synapse/visibility.py | 6 ++-- 16 files changed, 59 insertions(+), 67 deletions(-) create mode 100644 changelog.d/16301.misc diff --git a/changelog.d/16301.misc b/changelog.d/16301.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16301.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a94b57a671..9ac7e4313e 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -27,9 +27,7 @@ from typing import ( Any, Awaitable, Callable, - Collection, Dict, - Iterable, List, NoReturn, Optional, @@ -76,7 +74,7 @@ from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_ from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( load_legacy_third_party_event_rules, ) -from synapse.types import ISynapseReactor +from synapse.types import ISynapseReactor, StrCollection from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries from synapse.util.daemonize import daemonize_process @@ -278,7 +276,7 @@ def register_start( reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper())) -def listen_metrics(bind_addresses: Iterable[str], port: int) -> None: +def listen_metrics(bind_addresses: StrCollection, port: int) -> None: """ Start Prometheus metrics server. """ @@ -315,7 +313,7 @@ def _set_prometheus_client_use_created_metrics(new_value: bool) -> None: def listen_manhole( - bind_addresses: Collection[str], + bind_addresses: StrCollection, port: int, manhole_settings: ManholeConfig, manhole_globals: dict, @@ -339,7 +337,7 @@ def listen_manhole( def listen_tcp( - bind_addresses: Collection[str], + bind_addresses: StrCollection, port: int, factory: ServerFactory, reactor: IReactorTCP = reactor, @@ -448,7 +446,7 @@ def listen_http( def listen_ssl( - bind_addresses: Collection[str], + bind_addresses: StrCollection, port: int, factory: ServerFactory, context_factory: IOpenSSLContextFactory, diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 58856839e1..c5816105f4 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -26,7 +26,6 @@ from textwrap import dedent from typing import ( Any, ClassVar, - Collection, Dict, Iterable, Iterator, @@ -384,7 +383,7 @@ class RootConfig: config_classes: List[Type[Config]] = [] - def __init__(self, config_files: Collection[str] = ()): + def __init__(self, config_files: StrSequence = ()): # Capture absolute paths here, so we can reload config after we daemonize. self.config_files = [os.path.abspath(path) for path in config_files] diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 35257a3b1b..3c1777b7ec 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -25,7 +25,6 @@ from typing import ( Iterable, List, Optional, - Sequence, Tuple, Type, TypeVar, @@ -408,7 +407,7 @@ class EventBase(metaclass=abc.ABCMeta): def keys(self) -> Iterable[str]: return self._dict.keys() - def prev_event_ids(self) -> Sequence[str]: + def prev_event_ids(self) -> List[str]: """Returns the list of prev event IDs. The order matches the order specified in the event, though there is no meaning to it. @@ -553,7 +552,7 @@ class FrozenEventV2(EventBase): self._event_id = "$" + encode_base64(compute_event_reference_hash(self)[1]) return self._event_id - def prev_event_ids(self) -> Sequence[str]: + def prev_event_ids(self) -> List[str]: """Returns the list of prev event IDs. The order matches the order specified in the event, though there is no meaning to it. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 14ea0e6640..1165c017ba 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import attr from signedjson.types import SigningKey @@ -28,7 +28,7 @@ from synapse.event_auth import auth_types_for_event from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict from synapse.state import StateHandler from synapse.storage.databases.main import DataStore -from synapse.types import EventID, JsonDict +from synapse.types import EventID, JsonDict, StrCollection from synapse.types.state import StateFilter from synapse.util import Clock from synapse.util.stringutils import random_string @@ -103,7 +103,7 @@ class EventBuilder: async def build( self, - prev_event_ids: Collection[str], + prev_event_ids: StrCollection, auth_event_ids: Optional[List[str]], depth: Optional[int] = None, ) -> EventBase: @@ -136,7 +136,7 @@ class EventBuilder: format_version = self.room_version.event_format # The types of auth/prev events changes between event versions. - prev_events: Union[Collection[str], List[Tuple[str, Dict[str, str]]]] + prev_events: Union[StrCollection, List[Tuple[str, Dict[str, str]]]] auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] if format_version == EventFormatVersions.ROOM_V1_V2: auth_events = await self._store.add_event_hashes(auth_event_ids) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 34625dd7a1..5da50cb0d2 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections.abc -from typing import Iterable, List, Type, Union, cast +from typing import List, Type, Union, cast import jsonschema from pydantic import Field, StrictBool, StrictStr @@ -36,7 +36,7 @@ from synapse.events.utils import ( from synapse.federation.federation_server import server_matches_acl_event from synapse.http.servlet import validate_json_object from synapse.rest.models import RequestBodyModel -from synapse.types import EventID, JsonDict, RoomID, UserID +from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID class EventValidator: @@ -225,7 +225,7 @@ class EventValidator: self._ensure_state_event(event) - def _ensure_strings(self, d: JsonDict, keys: Iterable[str]) -> None: + def _ensure_strings(self, d: JsonDict, keys: StrCollection) -> None: for s in keys: if s not in d: raise SynapseError(400, "'%s' not in content" % (s,)) diff --git a/synapse/http/client.py b/synapse/http/client.py index ca2cdbc6e2..c750e03b36 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -78,7 +78,7 @@ from synapse.http.replicationagent import ReplicationAgent from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import set_tag, start_active_span, tags -from synapse.types import ISynapseReactor +from synapse.types import ISynapseReactor, StrSequence from synapse.util import json_decoder from synapse.util.async_helpers import timeout_deferred @@ -108,10 +108,9 @@ RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValu # the value actually has to be a List, but List is invariant so we can't specify that # the entries can either be Lists or bytes. RawHeaderValue = Union[ - List[str], + StrSequence, List[bytes], List[Union[str, bytes]], - Tuple[str, ...], Tuple[bytes, ...], Tuple[Union[str, bytes], ...], ] diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index fc62793628..5d79d31579 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -18,7 +18,6 @@ import logging from http import HTTPStatus from typing import ( TYPE_CHECKING, - Iterable, List, Mapping, Optional, @@ -38,7 +37,7 @@ from twisted.web.server import Request from synapse.api.errors import Codes, SynapseError from synapse.http import redact_uri from synapse.http.server import HttpServer -from synapse.types import JsonDict, RoomAlias, RoomID +from synapse.types import JsonDict, RoomAlias, RoomID, StrCollection from synapse.util import json_decoder if TYPE_CHECKING: @@ -340,7 +339,7 @@ def parse_string( name: str, default: str, *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> str: ... @@ -352,7 +351,7 @@ def parse_string( name: str, *, required: Literal[True], - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> str: ... @@ -365,7 +364,7 @@ def parse_string( *, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: ... @@ -376,7 +375,7 @@ def parse_string( name: str, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: """ @@ -485,7 +484,7 @@ def parse_enum( def _parse_string_value( value: bytes, - allowed_values: Optional[Iterable[str]], + allowed_values: Optional[StrCollection], name: str, encoding: str, ) -> str: @@ -511,7 +510,7 @@ def parse_strings_from_args( args: Mapping[bytes, Sequence[bytes]], name: str, *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[List[str]]: ... @@ -523,7 +522,7 @@ def parse_strings_from_args( name: str, default: List[str], *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> List[str]: ... @@ -535,7 +534,7 @@ def parse_strings_from_args( name: str, *, required: Literal[True], - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> List[str]: ... @@ -548,7 +547,7 @@ def parse_strings_from_args( default: Optional[List[str]] = None, *, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[List[str]]: ... @@ -559,7 +558,7 @@ def parse_strings_from_args( name: str, default: Optional[List[str]] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[List[str]]: """ @@ -610,7 +609,7 @@ def parse_string_from_args( name: str, default: Optional[str] = None, *, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: ... @@ -623,7 +622,7 @@ def parse_string_from_args( default: Optional[str] = None, *, required: Literal[True], - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> str: ... @@ -635,7 +634,7 @@ def parse_string_from_args( name: str, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: ... @@ -646,7 +645,7 @@ def parse_string_from_args( name: str, default: Optional[str] = None, required: bool = False, - allowed_values: Optional[Iterable[str]] = None, + allowed_values: Optional[StrCollection] = None, encoding: str = "ascii", ) -> Optional[str]: """ @@ -821,7 +820,7 @@ def parse_and_validate_json_object_from_request( return validate_json_object(content, model_type) -def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None: +def assert_params_in_dict(body: JsonDict, required: StrCollection) -> None: absent = [] for k in required: if k not in body: diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 39fc629937..3cf2fbc3e2 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -25,7 +25,6 @@ from typing import ( Iterable, Mapping, Optional, - Sequence, Set, Tuple, Type, @@ -49,6 +48,7 @@ import synapse.metrics._reactor_metrics # noqa: F401 from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.metrics._twisted_exposition import MetricsResource, generate_latest from synapse.metrics._types import Collector +from synapse.types import StrSequence from synapse.util import SYNAPSE_VERSION logger = logging.getLogger(__name__) @@ -81,7 +81,7 @@ class LaterGauge(Collector): name: str desc: str - labels: Optional[Sequence[str]] = attr.ib(hash=False) + labels: Optional[StrSequence] = attr.ib(hash=False) # callback: should either return a value (if there are no labels for this metric), # or dict mapping from a label tuple to a value caller: Callable[ @@ -143,8 +143,8 @@ class InFlightGauge(Generic[MetricsEntry], Collector): self, name: str, desc: str, - labels: Sequence[str], - sub_metrics: Sequence[str], + labels: StrSequence, + sub_metrics: StrSequence, ): self.name = name self.desc = desc diff --git a/synapse/notifier.py b/synapse/notifier.py index 68115bca70..fc39e5c963 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -104,7 +104,7 @@ class _NotifierUserStream: def __init__( self, user_id: str, - rooms: Collection[str], + rooms: StrCollection, current_token: StreamToken, time_now_ms: int, ): @@ -457,7 +457,7 @@ class Notifier: stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, - rooms: Optional[Collection[str]] = None, + rooms: Optional[StrCollection] = None, ) -> None: """Used to inform listeners that something has happened event wise. @@ -529,7 +529,7 @@ class Notifier: user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], - room_ids: Optional[Collection[str]] = None, + room_ids: Optional[StrCollection] = None, from_token: StreamToken = StreamToken.START, ) -> T: """Wait until the callback returns a non empty response or the diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py index 5c1c19e1f3..73c568ef75 100644 --- a/synapse/rest/client/_base.py +++ b/synapse/rest/client/_base.py @@ -20,14 +20,14 @@ from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, from synapse.api.errors import InteractiveAuthIncompleteError from synapse.api.urls import CLIENT_API_PREFIX -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection logger = logging.getLogger(__name__) def client_patterns( path_regex: str, - releases: Iterable[str] = ("r0", "v3"), + releases: StrCollection = ("r0", "v3"), unstable: bool = True, v1: bool = False, ) -> Iterable[Pattern]: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 1b91cf5eaa..e977ed1044 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -20,7 +20,6 @@ from typing import ( Any, Awaitable, Callable, - Collection, DefaultDict, Dict, FrozenSet, @@ -49,7 +48,7 @@ from synapse.logging.opentracing import tag_args, trace from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour -from synapse.types import StateMap +from synapse.types import StateMap, StrCollection from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache @@ -197,7 +196,7 @@ class StateHandler: async def compute_state_after_events( self, room_id: str, - event_ids: Collection[str], + event_ids: StrCollection, state_filter: Optional[StateFilter] = None, await_full_state: bool = True, ) -> StateMap[str]: @@ -231,7 +230,7 @@ class StateHandler: return await ret.get_state(self._state_storage_controller, state_filter) async def get_current_user_ids_in_room( - self, room_id: str, latest_event_ids: Collection[str] + self, room_id: str, latest_event_ids: StrCollection ) -> Set[str]: """ Get the users IDs who are currently in a room. @@ -256,7 +255,7 @@ class StateHandler: return await self.store.get_joined_user_ids_from_state(room_id, state) async def get_hosts_in_room_at_events( - self, room_id: str, event_ids: Collection[str] + self, room_id: str, event_ids: StrCollection ) -> FrozenSet[str]: """Get the hosts that were in a room at the given event ids @@ -470,7 +469,7 @@ class StateHandler: @trace @measure_func() async def resolve_state_groups_for_events( - self, room_id: str, event_ids: Collection[str], await_full_state: bool = True + self, room_id: str, event_ids: StrCollection, await_full_state: bool = True ) -> _StateCacheEntry: """Given a list of event_ids this method fetches the state at each event, resolves conflicts between them and returns them. @@ -882,7 +881,7 @@ class StateResolutionStore: store: "DataStore" def get_events( - self, event_ids: Collection[str], allow_rejected: bool = False + self, event_ids: StrCollection, allow_rejected: bool = False ) -> Awaitable[Dict[str, EventBase]]: """Get events from the database diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 500e384695..c76a2f082e 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -17,7 +17,6 @@ import logging from typing import ( Awaitable, Callable, - Collection, Dict, Iterable, List, @@ -32,7 +31,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase -from synapse.types import MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateMap, StrCollection logger = logging.getLogger(__name__) @@ -45,7 +44,7 @@ async def resolve_events_with_store( room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], - state_map_factory: Callable[[Collection[str]], Awaitable[Dict[str, EventBase]]], + state_map_factory: Callable[[StrCollection], Awaitable[Dict[str, EventBase]]], ) -> StateMap[str]: """ Args: diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 44c49274a9..1752f95db8 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -19,7 +19,6 @@ from typing import ( Any, Awaitable, Callable, - Collection, Dict, Generator, Iterable, @@ -39,7 +38,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase -from synapse.types import MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateMap, StrCollection logger = logging.getLogger(__name__) @@ -56,7 +55,7 @@ class StateResolutionStore(Protocol): # This is usually synapse.state.StateResolutionStore, but it's replaced with a # TestStateResolutionStore in tests. def get_events( - self, event_ids: Collection[str], allow_rejected: bool = False + self, event_ids: StrCollection, allow_rejected: bool = False ) -> Awaitable[Dict[str, EventBase]]: ... @@ -366,7 +365,7 @@ async def _get_auth_chain_difference( union = unpersisted_set_ids[0].union(*unpersisted_set_ids[1:]) intersection = unpersisted_set_ids[0].intersection(*unpersisted_set_ids[1:]) - auth_difference_unpersisted_part: Collection[str] = union - intersection + auth_difference_unpersisted_part: StrCollection = union - intersection else: auth_difference_unpersisted_part = () state_sets_ids = [set(state_set.values()) for state_set in state_sets] diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index fab7008a8f..09de8f55e2 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -47,7 +47,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict, StrCollection +from synapse.types import JsonDict, StrCollection, StrSequence from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache @@ -1179,7 +1179,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas ) @cached(max_entries=5000, iterable=True) - async def get_latest_event_ids_in_room(self, room_id: str) -> Sequence[str]: + async def get_latest_event_ids_in_room(self, room_id: str) -> StrSequence: return await self.db_pool.simple_select_onecol( table="event_forward_extremities", keyvalues={"room_id": room_id}, diff --git a/synapse/visibility.py b/synapse/visibility.py index eac10f6438..f15fdd8314 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -36,7 +36,7 @@ from synapse.events.utils import prune_event from synapse.logging.opentracing import trace from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore -from synapse.types import RetentionPolicy, StateMap, get_domain_from_id +from synapse.types import RetentionPolicy, StateMap, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util import Clock @@ -150,12 +150,12 @@ async def filter_events_for_client( async def filter_event_for_clients_with_state( store: DataStore, - user_ids: Collection[str], + user_ids: StrCollection, event: EventBase, context: EventContext, is_peeking: bool = False, filter_send_to_client: bool = True, -) -> Collection[str]: +) -> StrCollection: """ Checks to see if an event is visible to the users in the list at the time of the event. From 7afb5e041004bab8b0aaf7909ce3c7a9ef80077f Mon Sep 17 00:00:00 2001 From: Hanadi Date: Wed, 13 Sep 2023 14:33:39 +0200 Subject: [PATCH 469/562] Fix using dehydrated devices (MSC2697) & refresh tokens (#16288) Refresh tokens were not correctly moved to the rehydrated device (similar to how the access token is currently handled). This resulted in invalid refresh tokens after rehydration. --- changelog.d/16288.bugfix | 1 + synapse/handlers/device.py | 7 ++++--- .../storage/databases/main/registration.py | 20 +++++++++++++++++++ tests/handlers/test_device.py | 10 +++++++++- 4 files changed, 34 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16288.bugfix diff --git a/changelog.d/16288.bugfix b/changelog.d/16288.bugfix new file mode 100644 index 0000000000..f08d10d1f3 --- /dev/null +++ b/changelog.d/16288.bugfix @@ -0,0 +1 @@ +Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index e2ae3da67e..0d3d5ebc86 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -758,12 +758,13 @@ class DeviceHandler(DeviceWorkerHandler): # If the dehydrated device was successfully deleted (the device ID # matched the stored dehydrated device), then modify the access - # token to use the dehydrated device's ID and copy the old device - # display name to the dehydrated device, and destroy the old device - # ID + # token and refresh token to use the dehydrated device's ID and + # copy the old device display name to the dehydrated device, + # and destroy the old device ID old_device_id = await self.store.set_device_for_access_token( access_token, device_id ) + await self.store.set_device_for_refresh_token(user_id, old_device_id, device_id) old_device = await self.store.get_device(user_id, old_device_id) if old_device is None: raise errors.NotFoundError() diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 7e85b73e8e..e34156dc55 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -2312,6 +2312,26 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): return next_id + async def set_device_for_refresh_token( + self, user_id: str, old_device_id: str, device_id: str + ) -> None: + """Moves refresh tokens from old device to current device + + Args: + user_id: The user of the devices. + old_device_id: The old device. + device_id: The new device ID. + Returns: + None + """ + + await self.db_pool.simple_update( + "refresh_tokens", + keyvalues={"user_id": user_id, "device_id": old_device_id}, + updatevalues={"device_id": device_id}, + desc="set_device_for_refresh_token", + ) + def _set_device_for_access_token_txn( self, txn: LoggingTransaction, token: str, device_id: str ) -> str: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 79d327499b..d4ed068357 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -461,6 +461,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase): self.message_handler = hs.get_device_message_handler() self.registration = hs.get_registration_handler() self.auth = hs.get_auth() + self.auth_handler = hs.get_auth_handler() self.store = hs.get_datastores().main return hs @@ -487,11 +488,12 @@ class DehydrationTestCase(unittest.HomeserverTestCase): self.assertEqual(device_data, {"device_data": {"foo": "bar"}}) # Create a new login for the user and dehydrated the device - device_id, access_token, _expiration_time, _refresh_token = self.get_success( + device_id, access_token, _expiration_time, refresh_token = self.get_success( self.registration.register_device( user_id=user_id, device_id=None, initial_display_name="new device", + should_issue_refresh_token=True, ) ) @@ -522,6 +524,12 @@ class DehydrationTestCase(unittest.HomeserverTestCase): self.assertEqual(user_info.device_id, retrieved_device_id) + # make sure the user device has the refresh token + assert refresh_token is not None + self.get_success( + self.auth_handler.refresh_token(refresh_token, 5 * 60 * 1000, 5 * 60 * 1000) + ) + # make sure the device has the display name that was set from the login res = self.get_success(self.handler.get_device(user_id, retrieved_device_id)) From 032cf84f524a972f38977a67d61163f08d9dcf2a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Sep 2023 16:17:06 +0100 Subject: [PATCH 470/562] Remove a reference cycle in background process (#16314) --- changelog.d/16314.misc | 1 + synapse/metrics/background_process_metrics.py | 21 ++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16314.misc diff --git a/changelog.d/16314.misc b/changelog.d/16314.misc new file mode 100644 index 0000000000..a32b07112a --- /dev/null +++ b/changelog.d/16314.misc @@ -0,0 +1 @@ +Remove a reference cycle for in background processes. diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 9ea4e23b31..f1f1f0cdf9 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -322,13 +322,21 @@ class BackgroundProcessLoggingContext(LoggingContext): if instance_id is None: instance_id = id(self) super().__init__("%s-%s" % (name, instance_id)) - self._proc = _BackgroundProcess(name, self) + self._proc: Optional[_BackgroundProcess] = _BackgroundProcess(name, self) def start(self, rusage: "Optional[resource.struct_rusage]") -> None: """Log context has started running (again).""" super().start(rusage) + if self._proc is None: + logger.error( + "Background process re-entered without a proc: %s", + self.name, + stack_info=True, + ) + return + # We've become active again so we make sure we're in the list of active # procs. (Note that "start" here means we've become active, as opposed # to starting for the first time.) @@ -345,6 +353,14 @@ class BackgroundProcessLoggingContext(LoggingContext): super().__exit__(type, value, traceback) + if self._proc is None: + logger.error( + "Background process exited without a proc: %s", + self.name, + stack_info=True, + ) + return + # The background process has finished. We explicitly remove and manually # update the metrics here so that if nothing is scraping metrics the set # doesn't infinitely grow. @@ -352,3 +368,6 @@ class BackgroundProcessLoggingContext(LoggingContext): _background_processes_active_since_last_scrape.discard(self._proc) self._proc.update_metrics() + + # Set proc to None to break the reference cycle. + self._proc = None From 954921736b88de25c775c519a206449e46b3bf07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Sep 2023 12:46:30 +0100 Subject: [PATCH 471/562] Refactor `get_user_by_id` (#16316) --- changelog.d/16316.misc | 1 + synapse/api/auth/internal.py | 2 +- synapse/api/auth/msc3861_delegated.py | 2 +- synapse/handlers/account.py | 2 +- synapse/handlers/admin.py | 49 ++++++------ synapse/handlers/message.py | 6 +- synapse/module_api/__init__.py | 4 +- synapse/rest/consent/consent_resource.py | 2 +- .../server_notices/consent_server_notices.py | 6 +- synapse/storage/databases/main/client_ips.py | 11 +++ .../storage/databases/main/registration.py | 76 ++++++------------- synapse/types/__init__.py | 10 ++- tests/api/test_auth.py | 12 ++- tests/storage/test_registration.py | 48 ++++++------ 14 files changed, 108 insertions(+), 123 deletions(-) create mode 100644 changelog.d/16316.misc diff --git a/changelog.d/16316.misc b/changelog.d/16316.misc new file mode 100644 index 0000000000..aa0644f278 --- /dev/null +++ b/changelog.d/16316.misc @@ -0,0 +1 @@ +Refactor `get_user_by_id`. diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index 6a5fd44ec0..a75f6f2cc4 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -268,7 +268,7 @@ class InternalAuth(BaseAuth): stored_user = await self.store.get_user_by_id(user_id) if not stored_user: raise InvalidClientTokenError("Unknown user_id %s" % user_id) - if not stored_user["is_guest"]: + if not stored_user.is_guest: raise InvalidClientTokenError( "Guest access token used for regular user" ) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index ef5d3f9b81..31bb035cc8 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -300,7 +300,7 @@ class MSC3861DelegatedAuth(BaseAuth): user_id = UserID(username, self._hostname) # First try to find a user from the username claim - user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string()) + user_info = await self.store.get_user_by_id(user_id=user_id.to_string()) if user_info is None: # If the user does not exist, we should create it on the fly # TODO: we could use SCIM to provision users ahead of time and listen diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py index c05a14304c..fa043cca86 100644 --- a/synapse/handlers/account.py +++ b/synapse/handlers/account.py @@ -102,7 +102,7 @@ class AccountHandler: """ status = {"exists": False} - userinfo = await self._main_store.get_userinfo_by_id(user_id.to_string()) + userinfo = await self._main_store.get_user_by_id(user_id.to_string()) if userinfo is not None: status = { diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 2f0e5f3b0a..7092ff3449 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Set from synapse.api.constants import Direction, Membership from synapse.events import EventBase -from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID +from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID, UserInfo from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -57,38 +57,30 @@ class AdminHandler: async def get_user(self, user: UserID) -> Optional[JsonDict]: """Function to get user details""" - user_info_dict = await self._store.get_user_by_id(user.to_string()) - if user_info_dict is None: + user_info: Optional[UserInfo] = await self._store.get_user_by_id( + user.to_string() + ) + if user_info is None: return None - # Restrict returned information to a known set of fields. This prevents additional - # fields added to get_user_by_id from modifying Synapse's external API surface. - user_info_to_return = { - "name", - "admin", - "deactivated", - "locked", - "shadow_banned", - "creation_ts", - "appservice_id", - "consent_server_notice_sent", - "consent_version", - "consent_ts", - "user_type", - "is_guest", - "last_seen_ts", + user_info_dict = { + "name": user.to_string(), + "admin": user_info.is_admin, + "deactivated": user_info.is_deactivated, + "locked": user_info.locked, + "shadow_banned": user_info.is_shadow_banned, + "creation_ts": user_info.creation_ts, + "appservice_id": user_info.appservice_id, + "consent_server_notice_sent": user_info.consent_server_notice_sent, + "consent_version": user_info.consent_version, + "consent_ts": user_info.consent_ts, + "user_type": user_info.user_type, + "is_guest": user_info.is_guest, } if self._msc3866_enabled: # Only include the approved flag if support for MSC3866 is enabled. - user_info_to_return.add("approved") - - # Restrict returned keys to a known set. - user_info_dict = { - key: value - for key, value in user_info_dict.items() - if key in user_info_to_return - } + user_info_dict["approved"] = user_info.approved # Add additional user metadata profile = await self._store.get_profileinfo(user) @@ -105,6 +97,9 @@ class AdminHandler: user_info_dict["external_ids"] = external_ids user_info_dict["erased"] = await self._store.is_user_erased(user.to_string()) + last_seen_ts = await self._store.get_last_seen_for_user_id(user.to_string()) + user_info_dict["last_seen_ts"] = last_seen_ts + return user_info_dict async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> Any: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d6be18cdef..c036578a3d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -828,13 +828,13 @@ class EventCreationHandler: u = await self.store.get_user_by_id(user_id) assert u is not None - if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT): + if u.user_type in (UserTypes.SUPPORT, UserTypes.BOT): # support and bot users are not required to consent return - if u["appservice_id"] is not None: + if u.appservice_id is not None: # users registered by an appservice are exempt return - if u["consent_version"] == self.config.consent.user_consent_version: + if u.consent_version == self.config.consent.user_consent_version: return consent_uri = self._consent_uri_builder.build_user_consent_uri(user.localpart) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d6efe10a28..7ec202be23 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -572,7 +572,7 @@ class ModuleApi: Returns: UserInfo object if a user was found, otherwise None """ - return await self._store.get_userinfo_by_id(user_id) + return await self._store.get_user_by_id(user_id) async def get_user_by_req( self, @@ -1878,7 +1878,7 @@ class AccountDataManager: raise TypeError(f"new_data must be a dict; got {type(new_data).__name__}") # Ensure the user exists, so we don't just write to users that aren't there. - if await self._store.get_userinfo_by_id(user_id) is None: + if await self._store.get_user_by_id(user_id) is None: raise ValueError(f"User {user_id} does not exist on this server.") await self._handler.add_account_data_for_user(user_id, data_type, new_data) diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 25f9ea285b..88d3ec1baf 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -129,7 +129,7 @@ class ConsentResource(DirectServeHtmlResource): if u is None: raise NotFoundError("Unknown user") - has_consented = u["consent_version"] == version + has_consented = u.consent_version == version userhmac = userhmac_bytes.decode("ascii") try: diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 94025ba41f..a879b6505e 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -79,15 +79,15 @@ class ConsentServerNotices: if u is None: return - if u["is_guest"] and not self._send_to_guests: + if u.is_guest and not self._send_to_guests: # don't send to guests return - if u["consent_version"] == self._current_consent_version: + if u.consent_version == self._current_consent_version: # user has already consented return - if u["consent_server_notice_sent"] == self._current_consent_version: + if u.consent_server_notice_sent == self._current_consent_version: # we've already sent a notice to the user return diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index d8d333e11d..7da47c3dd7 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -764,3 +764,14 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke } return list(results.values()) + + async def get_last_seen_for_user_id(self, user_id: str) -> Optional[int]: + """Get the last seen timestamp for a user, if we have it.""" + + return await self.db_pool.simple_select_one_onecol( + table="user_ips", + keyvalues={"user_id": user_id}, + retcol="MAX(last_seen)", + allow_none=True, + desc="get_last_seen_for_user_id", + ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index e34156dc55..cc964604e2 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -16,7 +16,7 @@ import logging import random import re -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast import attr @@ -192,8 +192,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): ) @cached() - async def get_user_by_id(self, user_id: str) -> Optional[Mapping[str, Any]]: - """Deprecated: use get_userinfo_by_id instead""" + async def get_user_by_id(self, user_id: str) -> Optional[UserInfo]: + """Returns info about the user account, if it exists.""" def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: # We could technically use simple_select_one here, but it would not perform @@ -202,16 +202,12 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): txn.execute( """ SELECT - name, password_hash, is_guest, admin, consent_version, consent_ts, + name, is_guest, admin, consent_version, consent_ts, consent_server_notice_sent, appservice_id, creation_ts, user_type, deactivated, COALESCE(shadow_banned, FALSE) AS shadow_banned, COALESCE(approved, TRUE) AS approved, - COALESCE(locked, FALSE) AS locked, last_seen_ts + COALESCE(locked, FALSE) AS locked FROM users - LEFT JOIN ( - SELECT user_id, MAX(last_seen) AS last_seen_ts - FROM user_ips GROUP BY user_id - ) ls ON users.name = ls.user_id WHERE name = ? """, (user_id,), @@ -228,51 +224,23 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): desc="get_user_by_id", func=get_user_by_id_txn, ) - - if row is not None: - # If we're using SQLite our boolean values will be integers. Because we - # present some of this data as is to e.g. server admins via REST APIs, we - # want to make sure we're returning the right type of data. - # Note: when adding a column name to this list, be wary of NULLable columns, - # since NULL values will be turned into False. - boolean_columns = [ - "admin", - "deactivated", - "shadow_banned", - "approved", - "locked", - ] - for column in boolean_columns: - row[column] = bool(row[column]) - - return row - - async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]: - """Get a UserInfo object for a user by user ID. - - Note! Currently uses the cache of `get_user_by_id`. Once that deprecated method is removed, - this method should be cached. - - Args: - user_id: The user to fetch user info for. - Returns: - `UserInfo` object if user found, otherwise `None`. - """ - user_data = await self.get_user_by_id(user_id) - if not user_data: + if row is None: return None + return UserInfo( - appservice_id=user_data["appservice_id"], - consent_server_notice_sent=user_data["consent_server_notice_sent"], - consent_version=user_data["consent_version"], - creation_ts=user_data["creation_ts"], - is_admin=bool(user_data["admin"]), - is_deactivated=bool(user_data["deactivated"]), - is_guest=bool(user_data["is_guest"]), - is_shadow_banned=bool(user_data["shadow_banned"]), - user_id=UserID.from_string(user_data["name"]), - user_type=user_data["user_type"], - last_seen_ts=user_data["last_seen_ts"], + appservice_id=row["appservice_id"], + consent_server_notice_sent=row["consent_server_notice_sent"], + consent_version=row["consent_version"], + consent_ts=row["consent_ts"], + creation_ts=row["creation_ts"], + is_admin=bool(row["admin"]), + is_deactivated=bool(row["deactivated"]), + is_guest=bool(row["is_guest"]), + is_shadow_banned=bool(row["shadow_banned"]), + user_id=UserID.from_string(row["name"]), + user_type=row["user_type"], + approved=bool(row["approved"]), + locked=bool(row["locked"]), ) async def is_trial_user(self, user_id: str) -> bool: @@ -290,10 +258,10 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): now = self._clock.time_msec() days = self.config.server.mau_appservice_trial_days.get( - info["appservice_id"], self.config.server.mau_trial_days + info.appservice_id, self.config.server.mau_trial_days ) trial_duration_ms = days * 24 * 60 * 60 * 1000 - is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms + is_trial = (now - info.creation_ts * 1000) < trial_duration_ms return is_trial @cached() diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 488714f60c..76b0e3e694 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -933,33 +933,37 @@ def get_verify_key_from_cross_signing_key( @attr.s(auto_attribs=True, frozen=True, slots=True) class UserInfo: - """Holds information about a user. Result of get_userinfo_by_id. + """Holds information about a user. Result of get_user_by_id. Attributes: user_id: ID of the user. appservice_id: Application service ID that created this user. consent_server_notice_sent: Version of policy documents the user has been sent. consent_version: Version of policy documents the user has consented to. + consent_ts: Time the user consented creation_ts: Creation timestamp of the user. is_admin: True if the user is an admin. is_deactivated: True if the user has been deactivated. is_guest: True if the user is a guest user. is_shadow_banned: True if the user has been shadow-banned. user_type: User type (None for normal user, 'support' and 'bot' other options). - last_seen_ts: Last activity timestamp of the user. + approved: If the user has been "approved" to register on the server. + locked: Whether the user's account has been locked """ user_id: UserID appservice_id: Optional[int] consent_server_notice_sent: Optional[str] consent_version: Optional[str] + consent_ts: Optional[int] user_type: Optional[str] creation_ts: int is_admin: bool is_deactivated: bool is_guest: bool is_shadow_banned: bool - last_seen_ts: Optional[int] + approved: bool + locked: bool class UserProfile(TypedDict): diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index dcd01d5688..e00d7215df 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -188,8 +188,11 @@ class AuthTestCase(unittest.HomeserverTestCase): ) app_service.is_interested_in_user = Mock(return_value=True) self.store.get_app_service_by_token = Mock(return_value=app_service) - # This just needs to return a truth-y value. - self.store.get_user_by_id = AsyncMock(return_value={"is_guest": False}) + + class FakeUserInfo: + is_guest = False + + self.store.get_user_by_id = AsyncMock(return_value=FakeUserInfo()) self.store.get_user_by_access_token = AsyncMock(return_value=None) request = Mock(args={}) @@ -341,7 +344,10 @@ class AuthTestCase(unittest.HomeserverTestCase): ) def test_get_guest_user_from_macaroon(self) -> None: - self.store.get_user_by_id = AsyncMock(return_value={"is_guest": True}) + class FakeUserInfo: + is_guest = True + + self.store.get_user_by_id = AsyncMock(return_value=FakeUserInfo()) self.store.get_user_by_access_token = AsyncMock(return_value=None) user_id = "@baldrick:matrix.org" diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 95c9792d54..0cca34d355 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -16,7 +16,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import UserTypes from synapse.api.errors import ThreepidValidationError from synapse.server import HomeServer -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, UserID, UserInfo from synapse.util import Clock from tests.unittest import HomeserverTestCase, override_config @@ -35,24 +35,22 @@ class RegistrationStoreTestCase(HomeserverTestCase): self.get_success(self.store.register_user(self.user_id, self.pwhash)) self.assertEqual( - { + UserInfo( # TODO(paul): Surely this field should be 'user_id', not 'name' - "name": self.user_id, - "password_hash": self.pwhash, - "admin": 0, - "is_guest": 0, - "consent_version": None, - "consent_ts": None, - "consent_server_notice_sent": None, - "appservice_id": None, - "creation_ts": 0, - "user_type": None, - "deactivated": 0, - "locked": 0, - "shadow_banned": 0, - "approved": 1, - "last_seen_ts": None, - }, + user_id=UserID.from_string(self.user_id), + is_admin=False, + is_guest=False, + consent_server_notice_sent=None, + consent_ts=None, + consent_version=None, + appservice_id=None, + creation_ts=0, + user_type=None, + is_deactivated=False, + locked=False, + is_shadow_banned=False, + approved=True, + ), (self.get_success(self.store.get_user_by_id(self.user_id))), ) @@ -65,9 +63,11 @@ class RegistrationStoreTestCase(HomeserverTestCase): user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user - self.assertEqual(user["consent_version"], "1") - self.assertGreater(user["consent_ts"], before_consent) - self.assertLess(user["consent_ts"], self.clock.time_msec()) + self.assertEqual(user.consent_version, "1") + self.assertIsNotNone(user.consent_ts) + assert user.consent_ts is not None + self.assertGreater(user.consent_ts, before_consent) + self.assertLess(user.consent_ts, self.clock.time_msec()) def test_add_tokens(self) -> None: self.get_success(self.store.register_user(self.user_id, self.pwhash)) @@ -215,7 +215,7 @@ class ApprovalRequiredRegistrationTestCase(HomeserverTestCase): user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user is not None - self.assertTrue(user["approved"]) + self.assertTrue(user.approved) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) @@ -228,7 +228,7 @@ class ApprovalRequiredRegistrationTestCase(HomeserverTestCase): user = self.get_success(self.store.get_user_by_id(self.user_id)) assert user is not None - self.assertFalse(user["approved"]) + self.assertFalse(user.approved) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertFalse(approved) @@ -248,7 +248,7 @@ class ApprovalRequiredRegistrationTestCase(HomeserverTestCase): user = self.get_success(self.store.get_user_by_id(self.user_id)) self.assertIsNotNone(user) assert user is not None - self.assertEqual(user["approved"], 1) + self.assertEqual(user.approved, 1) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved) From 39dc5de39912828372acc4ecbaa46c9e2b3de97e Mon Sep 17 00:00:00 2001 From: 6543 <6543@obermui.de> Date: Thu, 14 Sep 2023 15:13:48 +0200 Subject: [PATCH 472/562] docs: Link to the Alpine Linux community package for Synapse (#16304) --- changelog.d/16304.doc | 1 + docs/setup/installation.md | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/16304.doc diff --git a/changelog.d/16304.doc b/changelog.d/16304.doc new file mode 100644 index 0000000000..53660ec9a4 --- /dev/null +++ b/changelog.d/16304.doc @@ -0,0 +1 @@ +Link to the Alpine Linux community package for Synapse. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 0357d2a0fb..1f13864a8f 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -155,6 +155,14 @@ sudo pip uninstall py-bcrypt sudo pip install py-bcrypt ``` +#### Alpine Linux + +6543 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with: + +```sh +sudo apk add synapse +``` + #### Void Linux Synapse can be found in the void repositories as From e9e2904eb2c0b73eb4154faf41bd360e6168cc92 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Sep 2023 14:56:07 +0100 Subject: [PATCH 473/562] Speed up deleting to-device messages task (#16318) --- changelog.d/16318.misc | 1 + synapse/handlers/device.py | 27 ++++++++++++++------------- 2 files changed, 15 insertions(+), 13 deletions(-) create mode 100644 changelog.d/16318.misc diff --git a/changelog.d/16318.misc b/changelog.d/16318.misc new file mode 100644 index 0000000000..1433a2f246 --- /dev/null +++ b/changelog.d/16318.misc @@ -0,0 +1 @@ +Speed up task to delete to-device messages. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 0d3d5ebc86..86ad96d030 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -388,7 +388,8 @@ class DeviceWorkerHandler: "Trying handling device list state for partial join: not supported on workers." ) - DEVICE_MSGS_DELETE_BATCH_LIMIT = 100 + DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000 + DEVICE_MSGS_DELETE_SLEEP_MS = 1000 async def _delete_device_messages( self, @@ -400,19 +401,19 @@ class DeviceWorkerHandler: device_id = task.params["device_id"] up_to_stream_id = task.params["up_to_stream_id"] - res = await self.store.delete_messages_for_device( - user_id=user_id, - device_id=device_id, - up_to_stream_id=up_to_stream_id, - limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, - ) + # Delete the messages in batches to avoid too much DB load. + while True: + res = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=up_to_stream_id, + limit=DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT, + ) - if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: - return TaskStatus.COMPLETE, None, None - else: - # There is probably still device messages to be deleted, let's keep the task active and it will be run - # again in a subsequent scheduler loop run (probably the next one, if not too many tasks are running). - return TaskStatus.ACTIVE, None, None + if res < DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT: + return TaskStatus.COMPLETE, None, None + + await self.clock.sleep(DeviceHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0) class DeviceHandler(DeviceWorkerHandler): From 329597022ee02516e5cbee11fcd566e05609b724 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Sep 2023 16:20:47 +0100 Subject: [PATCH 474/562] Some minor performance fixes for task schedular (#16313) --- changelog.d/16313.misc | 1 + synapse/replication/tcp/handler.py | 6 +- .../storage/databases/main/task_scheduler.py | 6 ++ .../delta/82/02_scheduled_tasks_index.sql | 16 +++ synapse/util/task_scheduler.py | 100 ++++++++++++------ 5 files changed, 95 insertions(+), 34 deletions(-) create mode 100644 changelog.d/16313.misc create mode 100644 synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql diff --git a/changelog.d/16313.misc b/changelog.d/16313.misc new file mode 100644 index 0000000000..4f266c1fb0 --- /dev/null +++ b/changelog.d/16313.misc @@ -0,0 +1 @@ +Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 5642666411..b668bb5da1 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -672,14 +672,12 @@ class ReplicationCommandHandler: cmd.instance_name, cmd.lock_name, cmd.lock_key ) - async def on_NEW_ACTIVE_TASK( + def on_NEW_ACTIVE_TASK( self, conn: IReplicationConnection, cmd: NewActiveTaskCommand ) -> None: """Called when get a new NEW_ACTIVE_TASK command.""" if self._task_scheduler: - task = await self._task_scheduler.get_task(cmd.data) - if task: - await self._task_scheduler._launch_task(task) + self._task_scheduler.launch_task_by_id(cmd.data) def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 9ab120eea9..5c5372a825 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -53,6 +53,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): resource_id: Optional[str] = None, statuses: Optional[List[TaskStatus]] = None, max_timestamp: Optional[int] = None, + limit: Optional[int] = None, ) -> List[ScheduledTask]: """Get a list of scheduled tasks from the DB. @@ -62,6 +63,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): statuses: Limit the returned tasks to the specific statuses max_timestamp: Limit the returned tasks to the ones that have a timestamp inferior to the specified one + limit: Only return `limit` number of rows if set. Returns: a list of `ScheduledTask`, ordered by increasing timestamps """ @@ -94,6 +96,10 @@ class TaskSchedulerWorkerStore(SQLBaseStore): sql = sql + " ORDER BY timestamp" + if limit is not None: + sql += " LIMIT ?" + args.append(limit) + txn.execute(sql, args) return self.db_pool.cursor_to_dict(txn) diff --git a/synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql b/synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql new file mode 100644 index 0000000000..6b90275139 --- /dev/null +++ b/synapse/storage/schema/main/delta/82/02_scheduled_tasks_index.sql @@ -0,0 +1,16 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX IF NOT EXISTS scheduled_tasks_timestamp ON scheduled_tasks(timestamp); diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index b7de201bde..caf13b3474 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -15,12 +15,14 @@ import logging from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set, Tuple -from prometheus_client import Gauge - from twisted.python.failure import Failure from synapse.logging.context import nested_logging_context -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics import LaterGauge +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util.stringutils import random_string @@ -30,12 +32,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -running_tasks_gauge = Gauge( - "synapse_scheduler_running_tasks", - "The number of concurrent running tasks handled by the TaskScheduler", -) - - class TaskScheduler: """ This is a simple task sheduler aimed at resumable tasks: usually we use `run_in_background` @@ -70,6 +66,8 @@ class TaskScheduler: # Precision of the scheduler, evaluation of tasks to run will only happen # every `SCHEDULE_INTERVAL_MS` ms SCHEDULE_INTERVAL_MS = 1 * 60 * 1000 # 1mn + # How often to clean up old tasks. + CLEANUP_INTERVAL_MS = 30 * 60 * 1000 # Time before a complete or failed task is deleted from the DB KEEP_TASKS_FOR_MS = 7 * 24 * 60 * 60 * 1000 # 1 week # Maximum number of tasks that can run at the same time @@ -92,13 +90,25 @@ class TaskScheduler: ] = {} self._run_background_tasks = hs.config.worker.run_background_tasks + # Flag to make sure we only try and launch new tasks once at a time. + self._launching_new_tasks = False + if self._run_background_tasks: self._clock.looping_call( - run_as_background_process, + self._launch_scheduled_tasks, TaskScheduler.SCHEDULE_INTERVAL_MS, - "handle_scheduled_tasks", - self._handle_scheduled_tasks, ) + self._clock.looping_call( + self._clean_scheduled_tasks, + TaskScheduler.SCHEDULE_INTERVAL_MS, + ) + + LaterGauge( + "synapse_scheduler_running_tasks", + "The number of concurrent running tasks handled by the TaskScheduler", + labels=None, + caller=lambda: len(self._running_tasks), + ) def register_action( self, @@ -234,6 +244,7 @@ class TaskScheduler: resource_id: Optional[str] = None, statuses: Optional[List[TaskStatus]] = None, max_timestamp: Optional[int] = None, + limit: Optional[int] = None, ) -> List[ScheduledTask]: """Get a list of tasks. Returns all the tasks if no args is provided. @@ -247,6 +258,7 @@ class TaskScheduler: statuses: Limit the returned tasks to the specific statuses max_timestamp: Limit the returned tasks to the ones that have a timestamp inferior to the specified one + limit: Only return `limit` number of rows if set. Returns A list of `ScheduledTask`, ordered by increasing timestamps @@ -256,6 +268,7 @@ class TaskScheduler: resource_id=resource_id, statuses=statuses, max_timestamp=max_timestamp, + limit=limit, ) async def delete_task(self, id: str) -> None: @@ -273,34 +286,58 @@ class TaskScheduler: raise Exception(f"Task {id} is currently ACTIVE and can't be deleted") await self._store.delete_scheduled_task(id) - async def _handle_scheduled_tasks(self) -> None: - """Main loop taking care of launching tasks and cleaning up old ones.""" - await self._launch_scheduled_tasks() - await self._clean_scheduled_tasks() + def launch_task_by_id(self, id: str) -> None: + """Try launching the task with the given ID.""" + # Don't bother trying to launch new tasks if we're already at capacity. + if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: + return + run_as_background_process("launch_task_by_id", self._launch_task_by_id, id) + + async def _launch_task_by_id(self, id: str) -> None: + """Helper async function for `launch_task_by_id`.""" + task = await self.get_task(id) + if task: + await self._launch_task(task) + + @wrap_as_background_process("launch_scheduled_tasks") async def _launch_scheduled_tasks(self) -> None: """Retrieve and launch scheduled tasks that should be running at that time.""" - for task in await self.get_tasks(statuses=[TaskStatus.ACTIVE]): - await self._launch_task(task) - for task in await self.get_tasks( - statuses=[TaskStatus.SCHEDULED], max_timestamp=self._clock.time_msec() - ): - await self._launch_task(task) + # Don't bother trying to launch new tasks if we're already at capacity. + if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: + return - running_tasks_gauge.set(len(self._running_tasks)) + if self._launching_new_tasks: + return + self._launching_new_tasks = True + + try: + for task in await self.get_tasks( + statuses=[TaskStatus.ACTIVE], limit=self.MAX_CONCURRENT_RUNNING_TASKS + ): + await self._launch_task(task) + for task in await self.get_tasks( + statuses=[TaskStatus.SCHEDULED], + max_timestamp=self._clock.time_msec(), + limit=self.MAX_CONCURRENT_RUNNING_TASKS, + ): + await self._launch_task(task) + + finally: + self._launching_new_tasks = False + + @wrap_as_background_process("clean_scheduled_tasks") async def _clean_scheduled_tasks(self) -> None: """Clean old complete or failed jobs to avoid clutter the DB.""" + now = self._clock.time_msec() for task in await self._store.get_scheduled_tasks( - statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE] + statuses=[TaskStatus.FAILED, TaskStatus.COMPLETE], + max_timestamp=now - TaskScheduler.KEEP_TASKS_FOR_MS, ): # FAILED and COMPLETE tasks should never be running assert task.id not in self._running_tasks - if ( - self._clock.time_msec() - > task.timestamp + TaskScheduler.KEEP_TASKS_FOR_MS - ): - await self._store.delete_scheduled_task(task.id) + await self._store.delete_scheduled_task(task.id) async def _launch_task(self, task: ScheduledTask) -> None: """Launch a scheduled task now. @@ -339,6 +376,9 @@ class TaskScheduler: ) self._running_tasks.remove(task.id) + # Try launch a new task since we've finished with this one. + self._clock.call_later(1, self._launch_scheduled_tasks) + if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return @@ -355,4 +395,4 @@ class TaskScheduler: self._running_tasks.add(task.id) await self.update_task(task.id, status=TaskStatus.ACTIVE) - run_as_background_process(task.action, wrapper) + run_as_background_process(f"task-{task.action}", wrapper) From edec0b93cabbe5e03d658a2aa4c2c1b79cf8e85e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Sep 2023 09:10:24 +0100 Subject: [PATCH 475/562] Only use literal strings for process names (#16315) --- changelog.d/16315.misc | 1 + synapse/appservice/scheduler.py | 13 ++++--------- synapse/metrics/background_process_metrics.py | 7 +++++-- synapse/util/caches/expiringcache.py | 4 +--- 4 files changed, 11 insertions(+), 14 deletions(-) create mode 100644 changelog.d/16315.misc diff --git a/changelog.d/16315.misc b/changelog.d/16315.misc new file mode 100644 index 0000000000..d88782c979 --- /dev/null +++ b/changelog.d/16315.misc @@ -0,0 +1 @@ +Only use literal strings for background process names. diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 3a319b0d42..79f95f7653 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -200,9 +200,7 @@ class _ServiceQueuer: if service.id in self.requests_in_flight: return - run_as_background_process( - "as-sender-%s" % (service.id,), self._send_request, service - ) + run_as_background_process("as-sender", self._send_request, service) async def _send_request(self, service: ApplicationService) -> None: # sanity-check: we shouldn't get here if this service already has a sender @@ -478,14 +476,11 @@ class _Recoverer: self.backoff_counter = 1 def recover(self) -> None: - def _retry() -> None: - run_as_background_process( - "as-recoverer-%s" % (self.service.id,), self.retry - ) - delay = 2**self.backoff_counter logger.info("Scheduling retries on %s in %fs", self.service.id, delay) - self.clock.call_later(delay, _retry) + self.clock.call_later( + delay, run_as_background_process, "as-recoverer", self.retry + ) def _backoff(self) -> None: # cap the backoff to be around 8.5min => (2^9) = 512 secs diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index f1f1f0cdf9..fceb7a9f3c 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -48,6 +48,9 @@ from synapse.metrics._types import Collector if TYPE_CHECKING: import resource + # Old versions don't have `LiteralString` + from typing_extensions import LiteralString + logger = logging.getLogger(__name__) @@ -191,7 +194,7 @@ R = TypeVar("R") def run_as_background_process( - desc: str, + desc: "LiteralString", func: Callable[..., Awaitable[Optional[R]]], *args: Any, bg_start_span: bool = True, @@ -259,7 +262,7 @@ P = ParamSpec("P") def wrap_as_background_process( - desc: str, + desc: "LiteralString", ) -> Callable[ [Callable[P, Awaitable[Optional[R]]]], Callable[P, "defer.Deferred[Optional[R]]"], diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 8e4c34039d..e73cf66080 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -84,9 +84,7 @@ class ExpiringCache(Generic[KT, VT]): return def f() -> "defer.Deferred[None]": - return run_as_background_process( - "prune_cache_%s" % self._cache_name, self._prune_cache - ) + return run_as_background_process("prune_cache", self._prune_cache) self._clock.looping_call(f, self._expiry_ms / 2) From 2a0f86f88fdb3d450212541ba7db57df6a184ae3 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Fri, 15 Sep 2023 03:16:45 -0500 Subject: [PATCH 476/562] Convert `_insert_graph_receipts_txn` to `simple_upsert` (#16299) --- changelog.d/16299.misc | 1 + synapse/storage/database.py | 3 +++ synapse/storage/databases/main/receipts.py | 23 +++++++++------------- 3 files changed, 13 insertions(+), 14 deletions(-) create mode 100644 changelog.d/16299.misc diff --git a/changelog.d/16299.misc b/changelog.d/16299.misc new file mode 100644 index 0000000000..d454669151 --- /dev/null +++ b/changelog.d/16299.misc @@ -0,0 +1 @@ +Refactor `receipts_graph` Postgres transactions to stop error messages. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 6c5fcdcec3..697bc5651c 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1193,6 +1193,7 @@ class DatabasePool: keyvalues: Dict[str, Any], values: Dict[str, Any], insertion_values: Optional[Dict[str, Any]] = None, + where_clause: Optional[str] = None, desc: str = "simple_upsert", ) -> bool: """Insert a row with values + insertion_values; on conflict, update with values. @@ -1243,6 +1244,7 @@ class DatabasePool: keyvalues: The unique key columns and their new values values: The nonunique columns and their new values insertion_values: additional key/values to use only when inserting + where_clause: An index predicate to apply to the upsert. desc: description of the transaction, for logging and metrics Returns: Returns True if a row was inserted or updated (i.e. if `values` is @@ -1263,6 +1265,7 @@ class DatabasePool: keyvalues, values, insertion_values, + where_clause, db_autocommit=autocommit, ) except self.engine.module.IntegrityError as e: diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index e4d10ff250..a074c43989 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -795,9 +795,7 @@ class ReceiptsWorkerStore(SQLBaseStore): now - event_ts, ) - await self.db_pool.runInteraction( - "insert_graph_receipt", - self._insert_graph_receipt_txn, + await self._insert_graph_receipt( room_id, receipt_type, user_id, @@ -810,9 +808,8 @@ class ReceiptsWorkerStore(SQLBaseStore): return stream_id, max_persisted_id - def _insert_graph_receipt_txn( + async def _insert_graph_receipt( self, - txn: LoggingTransaction, room_id: str, receipt_type: str, user_id: str, @@ -822,13 +819,6 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -> None: assert self._can_write_to_receipts - txn.call_after( - self._get_receipts_for_user_with_orderings.invalidate, - (user_id, receipt_type), - ) - # FIXME: This shouldn't invalidate the whole cache - txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,)) - keyvalues = { "room_id": room_id, "receipt_type": receipt_type, @@ -840,8 +830,8 @@ class ReceiptsWorkerStore(SQLBaseStore): else: keyvalues["thread_id"] = thread_id - self.db_pool.simple_upsert_txn( - txn, + await self.db_pool.simple_upsert( + desc="insert_graph_receipt", table="receipts_graph", keyvalues=keyvalues, values={ @@ -851,6 +841,11 @@ class ReceiptsWorkerStore(SQLBaseStore): where_clause=where_clause, ) + self._get_receipts_for_user_with_orderings.invalidate((user_id, receipt_type)) + + # FIXME: This shouldn't invalidate the whole cache + self._get_linearized_receipts_for_room.invalidate((room_id,)) + class ReceiptsBackgroundUpdateStore(SQLBaseStore): POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering" From 3cf1a3aa17423f954a9d46de4cced906670d7500 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Sep 2023 13:14:10 +0100 Subject: [PATCH 477/562] Use bookwork as docker base image (#16324) --- changelog.d/16324.docker | 1 + docker/Dockerfile | 14 +++++++------- docker/Dockerfile-workers | 4 ++-- docker/complement/Dockerfile | 4 ++-- docker/editable.Dockerfile | 8 ++++---- 5 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/16324.docker diff --git a/changelog.d/16324.docker b/changelog.d/16324.docker new file mode 100644 index 0000000000..43b31c6601 --- /dev/null +++ b/changelog.d/16324.docker @@ -0,0 +1 @@ +Update docker image to use Debian bookworm as the base. diff --git a/docker/Dockerfile b/docker/Dockerfile index 12cff84131..b58e518ec1 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -25,9 +25,9 @@ ARG PYTHON_VERSION=3.11 ### ### Stage 0: generate requirements.txt ### -# We hardcode the use of Debian bullseye here because this could change upstream -# and other Dockerfiles used for testing are expecting bullseye. -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as requirements +# We hardcode the use of Debian bookworm here because this could change upstream +# and other Dockerfiles used for testing are expecting bookworm. +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements # RUN --mount is specific to buildkit and is documented at # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. @@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ ### ### Stage 1: builder ### -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as builder +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder # install the OS build deps RUN \ @@ -158,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \ ### Stage 2: runtime ### -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' @@ -173,10 +173,10 @@ RUN \ gosu \ libjpeg62-turbo \ libpq5 \ - libwebp6 \ + libwebp7 \ xmlsec1 \ libjemalloc2 \ - libicu67 \ + libicu72 \ libssl-dev \ openssl \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 31d6d33407..2ceb6ab67c 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION # target image. For repeated rebuilds, this is much faster than apt installing # each time. -FROM docker.io/library/debian:bullseye-slim AS deps_base +FROM docker.io/library/debian:bookworm-slim AS deps_base RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ @@ -21,7 +21,7 @@ FROM docker.io/library/debian:bullseye-slim AS deps_base # which makes it much easier to copy (but we need to make sure we use an image # based on the same debian version as the synapse image, to make sure we get # the expected version of libc. -FROM docker.io/library/redis:7-bullseye AS redis_base +FROM docker.io/library/redis:7-bookworm AS redis_base # now build the final image, based on the the regular Synapse docker image FROM $FROM diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 5103068a49..b511e2ab23 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -20,8 +20,8 @@ FROM $FROM # the same debian version as Synapse's docker image (so the versions of the # shared libraries match). RUN adduser --system --uid 999 postgres --home /var/lib/postgresql - COPY --from=docker.io/library/postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql - COPY --from=docker.io/library/postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql + COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql + COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql ENV PATH="${PATH}:/usr/lib/postgresql/13/bin" ENV PGDATA=/var/lib/postgresql/data diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile index c53ce1c718..f18cf6a5d9 100644 --- a/docker/editable.Dockerfile +++ b/docker/editable.Dockerfile @@ -8,9 +8,9 @@ ARG PYTHON_VERSION=3.9 ### ### Stage 0: generate requirements.txt ### -# We hardcode the use of Debian bullseye here because this could change upstream -# and other Dockerfiles used for testing are expecting bullseye. -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye +# We hardcode the use of Debian bookworm here because this could change upstream +# and other Dockerfiles used for testing are expecting bookworm. +FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm # Install Rust and other dependencies (stolen from normal Dockerfile) # install the OS build deps @@ -33,7 +33,7 @@ RUN \ gosu \ libjpeg62-turbo \ libpq5 \ - libwebp6 \ + libwebp7 \ xmlsec1 \ libjemalloc2 \ && rm -rf /var/lib/apt/lists/* From e6be9a3ca465b475d7eb8f752f42e9c9e17b20f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Sep 2023 13:30:16 +0100 Subject: [PATCH 478/562] 1.92.2 --- CHANGES.md | 10 ++++++++++ changelog.d/16324.docker | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 17 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/16324.docker diff --git a/CHANGES.md b/CHANGES.md index 13c53d2606..077588459a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +# Synapse 1.92.2 (2023-09-15) + +This is a Docker only update to mitigate [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863), a critical vulnerability in `libewebp`. Server admins not using Docker should ensure that their `libwebp` is up to date (if installed). We encourage admins to upgrade as soon as possible. + + +### Updates to the Docker image + +- Update docker image to use Debian bookworm as the base. ([\#16324](https://github.com/matrix-org/synapse/issues/16324)) + + # Synapse 1.92.1 (2023-09-12) Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. diff --git a/changelog.d/16324.docker b/changelog.d/16324.docker deleted file mode 100644 index 43b31c6601..0000000000 --- a/changelog.d/16324.docker +++ /dev/null @@ -1 +0,0 @@ -Update docker image to use Debian bookworm as the base. diff --git a/debian/changelog b/debian/changelog index 9553967098..79e7fccfca 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.92.2) stable; urgency=medium + + * New Synapse release 1.92.2. + + -- Synapse Packaging team Fri, 15 Sep 2023 13:17:41 +0100 + matrix-synapse-py3 (1.92.1) stable; urgency=medium * New Synapse release 1.92.1. diff --git a/pyproject.toml b/pyproject.toml index 821b13f5c4..1144114041 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.92.1" +version = "1.92.2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From e7274f764b057e5cd4f96e9d67eb011367564411 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Sep 2023 13:34:44 +0100 Subject: [PATCH 479/562] Fix Changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 077588459a..f913c2069b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Synapse 1.92.2 (2023-09-15) -This is a Docker only update to mitigate [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863), a critical vulnerability in `libewebp`. Server admins not using Docker should ensure that their `libwebp` is up to date (if installed). We encourage admins to upgrade as soon as possible. +This is a Docker-only update to mitigate [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863), a critical vulnerability in `libwebp`. Server admins not using Docker should ensure that their `libwebp` is up to date (if installed). We encourage admins to upgrade as soon as possible. ### Updates to the Docker image From dd44ee00b6cf4d900e56857039320660400cff37 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Fri, 15 Sep 2023 15:37:44 +0200 Subject: [PATCH 480/562] Add automatic purge after all users forget a room (#15488) Also add restore of purge/shutdown rooms after a synapse restart. Co-authored-by: Eric Eastwood Co-authored-by: Erik Johnston --- changelog.d/15488.feature | 1 + .../configuration/config_documentation.md | 11 + synapse/app/generic_worker.py | 2 + synapse/config/server.py | 11 + synapse/handlers/pagination.py | 474 ++++++------------ synapse/handlers/room.py | 173 ++++--- synapse/handlers/room_member.py | 30 +- synapse/module_api/__init__.py | 13 +- synapse/rest/admin/__init__.py | 20 +- synapse/rest/admin/rooms.py | 78 +-- tests/rest/admin/test_room.py | 159 +++++- tests/rest/admin/test_server_notice.py | 18 +- tests/rest/client/test_rooms.py | 6 +- 13 files changed, 544 insertions(+), 452 deletions(-) create mode 100644 changelog.d/15488.feature diff --git a/changelog.d/15488.feature b/changelog.d/15488.feature new file mode 100644 index 0000000000..8684d84192 --- /dev/null +++ b/changelog.d/15488.feature @@ -0,0 +1 @@ +Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index a06b3d8a06..885a7bf0a3 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -936,6 +936,17 @@ Example configuration: redaction_retention_period: 28d ``` --- +### `forgotten_room_retention_period` + +How long to keep locally forgotten rooms before purging them from the DB. + +Defaults to `null`, meaning it's disabled. + +Example configuration: +```yaml +forgotten_room_retention_period: 28d +``` +--- ### `user_ips_max_age` How long to track users' last seen time and IPs in the database. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index d25e3548e0..f7c80eee21 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -77,6 +77,7 @@ from synapse.storage.databases.main.monthly_active_users import ( ) from synapse.storage.databases.main.presence import PresenceStore from synapse.storage.databases.main.profile import ProfileWorkerStore +from synapse.storage.databases.main.purge_events import PurgeEventsStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore from synapse.storage.databases.main.pusher import PusherWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore @@ -134,6 +135,7 @@ class GenericWorkerStore( RelationsWorkerStore, EventFederationWorkerStore, EventPushActionsWorkerStore, + PurgeEventsStore, StateGroupWorkerStore, SignatureWorkerStore, UserErasureWorkerStore, diff --git a/synapse/config/server.py b/synapse/config/server.py index b46fa51593..72d30da300 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -486,6 +486,17 @@ class ServerConfig(Config): else: self.redaction_retention_period = None + # How long to keep locally forgotten rooms before purging them from the DB. + forgotten_room_retention_period = config.get( + "forgotten_room_retention_period", None + ) + if forgotten_room_retention_period is not None: + self.forgotten_room_retention_period: Optional[int] = self.parse_duration( + forgotten_room_retention_period + ) + else: + self.forgotten_room_retention_period = None + # How long to keep entries in the `users_ips` table. user_ips_max_age = config.get("user_ips_max_age", "28d") if user_ips_max_age is not None: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 19cf5a2b43..878f267a4e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -13,9 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Set - -import attr +from typing import TYPE_CHECKING, List, Optional, Set, Tuple, cast from twisted.python.failure import Failure @@ -23,16 +21,22 @@ from synapse.api.constants import Direction, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import SerializeEventConfig -from synapse.handlers.room import ShutdownRoomResponse +from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging.opentracing import trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.admin._base import assert_user_is_admin from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType +from synapse.types import ( + JsonDict, + JsonMapping, + Requester, + ScheduledTask, + StreamKeyType, + TaskStatus, +) from synapse.types.state import StateFilter from synapse.util.async_helpers import ReadWriteLock -from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -53,80 +57,11 @@ BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3 PURGE_PAGINATION_LOCK_NAME = "purge_pagination_lock" -@attr.s(slots=True, auto_attribs=True) -class PurgeStatus: - """Object tracking the status of a purge request +PURGE_HISTORY_ACTION_NAME = "purge_history" - This class contains information on the progress of a purge request, for - return by get_purge_status. - """ +PURGE_ROOM_ACTION_NAME = "purge_room" - STATUS_ACTIVE = 0 - STATUS_COMPLETE = 1 - STATUS_FAILED = 2 - - STATUS_TEXT = { - STATUS_ACTIVE: "active", - STATUS_COMPLETE: "complete", - STATUS_FAILED: "failed", - } - - # Save the error message if an error occurs - error: str = "" - - # Tracks whether this request has completed. One of STATUS_{ACTIVE,COMPLETE,FAILED}. - status: int = STATUS_ACTIVE - - def asdict(self) -> JsonDict: - ret = {"status": PurgeStatus.STATUS_TEXT[self.status]} - if self.error: - ret["error"] = self.error - return ret - - -@attr.s(slots=True, auto_attribs=True) -class DeleteStatus: - """Object tracking the status of a delete room request - - This class contains information on the progress of a delete room request, for - return by get_delete_status. - """ - - STATUS_PURGING = 0 - STATUS_COMPLETE = 1 - STATUS_FAILED = 2 - STATUS_SHUTTING_DOWN = 3 - - STATUS_TEXT = { - STATUS_PURGING: "purging", - STATUS_COMPLETE: "complete", - STATUS_FAILED: "failed", - STATUS_SHUTTING_DOWN: "shutting_down", - } - - # Tracks whether this request has completed. - # One of STATUS_{PURGING,COMPLETE,FAILED,SHUTTING_DOWN}. - status: int = STATUS_PURGING - - # Save the error message if an error occurs - error: str = "" - - # Saves the result of an action to give it back to REST API - shutdown_room: ShutdownRoomResponse = { - "kicked_users": [], - "failed_to_kick_users": [], - "local_aliases": [], - "new_room_id": None, - } - - def asdict(self) -> JsonDict: - ret = { - "status": DeleteStatus.STATUS_TEXT[self.status], - "shutdown_room": self.shutdown_room, - } - if self.error: - ret["error"] = self.error - return ret +SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME = "shutdown_and_purge_room" class PaginationHandler: @@ -136,9 +71,6 @@ class PaginationHandler: paginating during a purge. """ - # when to remove a completed deletion/purge from the results map - CLEAR_PURGE_AFTER_MS = 1000 * 3600 * 24 # 24 hours - def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() @@ -150,17 +82,11 @@ class PaginationHandler: self._room_shutdown_handler = hs.get_room_shutdown_handler() self._relations_handler = hs.get_relations_handler() self._worker_locks = hs.get_worker_locks_handler() + self._task_scheduler = hs.get_task_scheduler() self.pagination_lock = ReadWriteLock() # IDs of rooms in which there currently an active purge *or delete* operation. self._purges_in_progress_by_room: Set[str] = set() - # map from purge id to PurgeStatus - self._purges_by_id: Dict[str, PurgeStatus] = {} - # map from purge id to DeleteStatus - self._delete_by_id: Dict[str, DeleteStatus] = {} - # map from room id to delete ids - # Dict[`room_id`, List[`delete_id`]] - self._delete_by_room: Dict[str, List[str]] = {} self._event_serializer = hs.get_event_client_serializer() self._retention_default_max_lifetime = ( @@ -173,6 +99,9 @@ class PaginationHandler: self._retention_allowed_lifetime_max = ( hs.config.retention.retention_allowed_lifetime_max ) + self._forgotten_room_retention_period = ( + hs.config.server.forgotten_room_retention_period + ) self._is_master = hs.config.worker.worker_app is None if hs.config.retention.retention_enabled and self._is_master: @@ -189,6 +118,14 @@ class PaginationHandler: job.longest_max_lifetime, ) + self._task_scheduler.register_action( + self._purge_history, PURGE_HISTORY_ACTION_NAME + ) + self._task_scheduler.register_action(self._purge_room, PURGE_ROOM_ACTION_NAME) + self._task_scheduler.register_action( + self._shutdown_and_purge_room, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME + ) + async def purge_history_for_rooms_in_range( self, min_ms: Optional[int], max_ms: Optional[int] ) -> None: @@ -224,7 +161,7 @@ class PaginationHandler: include_null = False logger.info( - "[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)", + "[purge] Running retention purge job for %s < max_lifetime <= %s (include NULLs = %s)", min_ms, max_ms, include_null, @@ -239,10 +176,10 @@ class PaginationHandler: for room_id, retention_policy in rooms.items(): logger.info("[purge] Attempting to purge messages in room %s", room_id) - if room_id in self._purges_in_progress_by_room: + if len(await self.get_delete_tasks_by_room(room_id, only_active=True)) > 0: logger.warning( - "[purge] not purging room %s as there's an ongoing purge running" - " for this room", + "[purge] not purging room %s for retention as there's an ongoing purge" + " running for this room", room_id, ) continue @@ -295,27 +232,20 @@ class PaginationHandler: (stream, topo, _event_id) = r token = "t%d-%d" % (topo, stream) - purge_id = random_string(16) - - self._purges_by_id[purge_id] = PurgeStatus() - - logger.info( - "Starting purging events in room %s (purge_id %s)" % (room_id, purge_id) - ) + logger.info("Starting purging events in room %s", room_id) # We want to purge everything, including local events, and to run the purge in # the background so that it's not blocking any other operation apart from # other purges in the same room. run_as_background_process( - "_purge_history", - self._purge_history, - purge_id, + PURGE_HISTORY_ACTION_NAME, + self.purge_history, room_id, token, True, ) - def start_purge_history( + async def start_purge_history( self, room_id: str, token: str, delete_local_events: bool = False ) -> str: """Start off a history purge on a room. @@ -329,40 +259,58 @@ class PaginationHandler: Returns: unique ID for this purge transaction. """ - if room_id in self._purges_in_progress_by_room: - raise SynapseError( - 400, "History purge already in progress for %s" % (room_id,) - ) - - purge_id = random_string(16) + purge_id = await self._task_scheduler.schedule_task( + PURGE_HISTORY_ACTION_NAME, + resource_id=room_id, + params={"token": token, "delete_local_events": delete_local_events}, + ) # we log the purge_id here so that it can be tied back to the # request id in the log lines. logger.info("[purge] starting purge_id %s", purge_id) - self._purges_by_id[purge_id] = PurgeStatus() - run_as_background_process( - "purge_history", - self._purge_history, - purge_id, - room_id, - token, - delete_local_events, - ) return purge_id async def _purge_history( - self, purge_id: str, room_id: str, token: str, delete_local_events: bool - ) -> None: + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """ + Scheduler action to purge some history of a room. + """ + if ( + task.resource_id is None + or task.params is None + or "token" not in task.params + or "delete_local_events" not in task.params + ): + return ( + TaskStatus.FAILED, + None, + "Not enough parameters passed to _purge_history", + ) + err = await self.purge_history( + task.resource_id, + task.params["token"], + task.params["delete_local_events"], + ) + if err is not None: + return TaskStatus.FAILED, None, err + return TaskStatus.COMPLETE, None, None + + async def purge_history( + self, + room_id: str, + token: str, + delete_local_events: bool, + ) -> Optional[str]: """Carry out a history purge on a room. Args: - purge_id: The ID for this purge. room_id: The room to purge from token: topological token to delete events before delete_local_events: True to delete local events as well as remote ones """ - self._purges_in_progress_by_room.add(room_id) try: async with self._worker_locks.acquire_read_write_lock( PURGE_PAGINATION_LOCK_NAME, room_id, write=True @@ -371,57 +319,68 @@ class PaginationHandler: room_id, token, delete_local_events ) logger.info("[purge] complete") - self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE + return None except Exception: f = Failure() logger.error( "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) ) - self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED - self._purges_by_id[purge_id].error = f.getErrorMessage() - finally: - self._purges_in_progress_by_room.discard(room_id) + return f.getErrorMessage() - # remove the purge from the list 24 hours after it completes - def clear_purge() -> None: - del self._purges_by_id[purge_id] - - self.hs.get_reactor().callLater( - PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_purge - ) - - def get_purge_status(self, purge_id: str) -> Optional[PurgeStatus]: - """Get the current status of an active purge - - Args: - purge_id: purge_id returned by start_purge_history - """ - return self._purges_by_id.get(purge_id) - - def get_delete_status(self, delete_id: str) -> Optional[DeleteStatus]: + async def get_delete_task(self, delete_id: str) -> Optional[ScheduledTask]: """Get the current status of an active deleting Args: delete_id: delete_id returned by start_shutdown_and_purge_room + or start_purge_history. """ - return self._delete_by_id.get(delete_id) + return await self._task_scheduler.get_task(delete_id) - def get_delete_ids_by_room(self, room_id: str) -> Optional[StrCollection]: - """Get all active delete ids by room + async def get_delete_tasks_by_room( + self, room_id: str, only_active: Optional[bool] = False + ) -> List[ScheduledTask]: + """Get complete, failed or active delete tasks by room Args: room_id: room_id that is deleted + only_active: if True, completed&failed tasks will be omitted """ - return self._delete_by_room.get(room_id) + statuses = [TaskStatus.ACTIVE] + if not only_active: + statuses += [TaskStatus.COMPLETE, TaskStatus.FAILED] - async def purge_room(self, room_id: str, force: bool = False) -> None: + return await self._task_scheduler.get_tasks( + actions=[PURGE_ROOM_ACTION_NAME, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME], + resource_id=room_id, + statuses=statuses, + ) + + async def _purge_room( + self, + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """ + Scheduler action to purge a room. + """ + if not task.resource_id: + raise Exception("No room id passed to purge_room task") + params = task.params if task.params else {} + await self.purge_room(task.resource_id, params.get("force", False)) + return TaskStatus.COMPLETE, None, None + + async def purge_room( + self, + room_id: str, + force: bool, + ) -> None: """Purge the given room from the database. - This function is part the delete room v1 API. Args: room_id: room to be purged force: set true to skip checking for joined users. """ + logger.info("starting purge room_id=%s force=%s", room_id, force) + async with self._worker_locks.acquire_multi_read_write_lock( [ (PURGE_PAGINATION_LOCK_NAME, room_id), @@ -430,13 +389,20 @@ class PaginationHandler: write=True, ): # first check that we have no users in this room - if not force: - joined = await self.store.is_host_joined(room_id, self._server_name) - if joined: + joined = await self.store.is_host_joined(room_id, self._server_name) + if joined: + if force: + logger.info( + "force-purging room %s with some local users still joined", + room_id, + ) + else: raise SynapseError(400, "Users are still joined to this room") await self._storage_controllers.purge_events.purge_room(room_id) + logger.info("purge complete for room_id %s", room_id) + @trace async def get_messages( self, @@ -711,177 +677,72 @@ class PaginationHandler: async def _shutdown_and_purge_room( self, - delete_id: str, - room_id: str, - requester_user_id: Optional[str], - new_room_user_id: Optional[str] = None, - new_room_name: Optional[str] = None, - message: Optional[str] = None, - block: bool = False, - purge: bool = True, - force_purge: bool = False, - ) -> None: + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: """ - Shuts down and purges a room. - - See `RoomShutdownHandler.shutdown_room` for details of creation of the new room - - Args: - delete_id: The ID for this delete. - room_id: The ID of the room to shut down. - requester_user_id: - User who requested the action. Will be recorded as putting the room on the - blocking list. - If None, the action was not manually requested but instead - triggered automatically, e.g. through a Synapse module - or some other policy. - MUST NOT be None if block=True. - new_room_user_id: - If set, a new room will be created with this user ID - as the creator and admin, and all users in the old room will be - moved into that room. If not set, no new room will be created - and the users will just be removed from the old room. - new_room_name: - A string representing the name of the room that new users will - be invited to. Defaults to `Content Violation Notification` - message: - A string containing the first message that will be sent as - `new_room_user_id` in the new room. Ideally this will clearly - convey why the original room was shut down. - Defaults to `Sharing illegal content on this server is not - permitted and rooms in violation will be blocked.` - block: - If set to `true`, this room will be added to a blocking list, - preventing future attempts to join the room. Defaults to `false`. - purge: - If set to `true`, purge the given room from the database. - force_purge: - If set to `true`, the room will be purged from database - also if it fails to remove some users from room. - - Saves a `RoomShutdownHandler.ShutdownRoomResponse` in `DeleteStatus`: + Scheduler action to shutdown and purge a room. """ - - self._purges_in_progress_by_room.add(room_id) - try: - async with self._worker_locks.acquire_read_write_lock( - PURGE_PAGINATION_LOCK_NAME, room_id, write=True - ): - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN - self._delete_by_id[ - delete_id - ].shutdown_room = await self._room_shutdown_handler.shutdown_room( - room_id=room_id, - requester_user_id=requester_user_id, - new_room_user_id=new_room_user_id, - new_room_name=new_room_name, - message=message, - block=block, - ) - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_PURGING - - if purge: - logger.info("starting purge room_id %s", room_id) - - # first check that we have no users in this room - if not force_purge: - joined = await self.store.is_host_joined( - room_id, self._server_name - ) - if joined: - raise SynapseError( - 400, "Users are still joined to this room" - ) - - await self._storage_controllers.purge_events.purge_room(room_id) - - logger.info("purge complete for room_id %s", room_id) - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE - except Exception: - f = Failure() - logger.error( - "failed", - exc_info=(f.type, f.value, f.getTracebackObject()), - ) - self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED - self._delete_by_id[delete_id].error = f.getErrorMessage() - finally: - self._purges_in_progress_by_room.discard(room_id) - - # remove the delete from the list 24 hours after it completes - def clear_delete() -> None: - del self._delete_by_id[delete_id] - self._delete_by_room[room_id].remove(delete_id) - if not self._delete_by_room[room_id]: - del self._delete_by_room[room_id] - - self.hs.get_reactor().callLater( - PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_delete + if task.resource_id is None or task.params is None: + raise Exception( + "No room id and/or no parameters passed to shutdown_and_purge_room task" ) - def start_shutdown_and_purge_room( + room_id = task.resource_id + + async def update_result(result: Optional[JsonMapping]) -> None: + await self._task_scheduler.update_task(task.id, result=result) + + shutdown_result = ( + cast(ShutdownRoomResponse, task.result) if task.result else None + ) + + shutdown_result = await self._room_shutdown_handler.shutdown_room( + room_id, + cast(ShutdownRoomParams, task.params), + shutdown_result, + update_result, + ) + + if task.params.get("purge", False): + await self.purge_room( + room_id, + task.params.get("force_purge", False), + ) + + return (TaskStatus.COMPLETE, shutdown_result, None) + + async def start_shutdown_and_purge_room( self, room_id: str, - requester_user_id: Optional[str], - new_room_user_id: Optional[str] = None, - new_room_name: Optional[str] = None, - message: Optional[str] = None, - block: bool = False, - purge: bool = True, - force_purge: bool = False, + shutdown_params: ShutdownRoomParams, ) -> str: """Start off shut down and purge on a room. Args: room_id: The ID of the room to shut down. - requester_user_id: - User who requested the action and put the room on the - blocking list. - If None, the action was not manually requested but instead - triggered automatically, e.g. through a Synapse module - or some other policy. - MUST NOT be None if block=True. - new_room_user_id: - If set, a new room will be created with this user ID - as the creator and admin, and all users in the old room will be - moved into that room. If not set, no new room will be created - and the users will just be removed from the old room. - new_room_name: - A string representing the name of the room that new users will - be invited to. Defaults to `Content Violation Notification` - message: - A string containing the first message that will be sent as - `new_room_user_id` in the new room. Ideally this will clearly - convey why the original room was shut down. - Defaults to `Sharing illegal content on this server is not - permitted and rooms in violation will be blocked.` - block: - If set to `true`, this room will be added to a blocking list, - preventing future attempts to join the room. Defaults to `false`. - purge: - If set to `true`, purge the given room from the database. - force_purge: - If set to `true`, the room will be purged from database - also if it fails to remove some users from room. + shutdown_params: parameters for the shutdown Returns: unique ID for this delete transaction. """ - if room_id in self._purges_in_progress_by_room: - raise SynapseError( - 400, "History purge already in progress for %s" % (room_id,) - ) + if len(await self.get_delete_tasks_by_room(room_id, only_active=True)) > 0: + raise SynapseError(400, "Purge already in progress for %s" % (room_id,)) # This check is double to `RoomShutdownHandler.shutdown_room` # But here the requester get a direct response / error with HTTP request # and do not have to check the purge status + new_room_user_id = shutdown_params["new_room_user_id"] if new_room_user_id is not None: if not self.hs.is_mine_id(new_room_user_id): raise SynapseError( 400, "User must be our own: %s" % (new_room_user_id,) ) - delete_id = random_string(16) + delete_id = await self._task_scheduler.schedule_task( + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + params=shutdown_params, + ) # we log the delete_id here so that it can be tied back to the # request id in the log lines. @@ -891,19 +752,4 @@ class PaginationHandler: delete_id, ) - self._delete_by_id[delete_id] = DeleteStatus() - self._delete_by_room.setdefault(room_id, []).append(delete_id) - run_as_background_process( - "shutdown_and_purge_room", - self._shutdown_and_purge_room, - delete_id, - room_id, - requester_user_id, - new_room_user_id, - new_room_name, - message, - block, - purge, - force_purge, - ) return delete_id diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7a762c8511..a0c3b16819 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -20,7 +20,7 @@ import random import string from collections import OrderedDict from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple import attr from typing_extensions import TypedDict @@ -54,11 +54,11 @@ from synapse.events import EventBase from synapse.events.snapshot import UnpersistedEventContext from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.handlers.relations import BundledAggregations -from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin from synapse.streams import EventSource from synapse.types import ( JsonDict, + JsonMapping, MutableStateMap, Requester, RoomAlias, @@ -454,7 +454,7 @@ class RoomCreationHandler: spam_check = await self._spam_checker_module_callbacks.user_may_create_room( user_id ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "You are not permitted to create rooms", @@ -768,7 +768,7 @@ class RoomCreationHandler: spam_check = await self._spam_checker_module_callbacks.user_may_create_room( user_id ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "You are not permitted to create rooms", @@ -1750,6 +1750,45 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): return self.store.get_current_room_stream_token_for_room_id(room_id) +class ShutdownRoomParams(TypedDict): + """ + Attributes: + requester_user_id: + User who requested the action. Will be recorded as putting the room on the + blocking list. + new_room_user_id: + If set, a new room will be created with this user ID + as the creator and admin, and all users in the old room will be + moved into that room. If not set, no new room will be created + and the users will just be removed from the old room. + new_room_name: + A string representing the name of the room that new users will + be invited to. Defaults to `Content Violation Notification` + message: + A string containing the first message that will be sent as + `new_room_user_id` in the new room. Ideally this will clearly + convey why the original room was shut down. + Defaults to `Sharing illegal content on this server is not + permitted and rooms in violation will be blocked.` + block: + If set to `true`, this room will be added to a blocking list, + preventing future attempts to join the room. Defaults to `false`. + purge: + If set to `true`, purge the given room from the database. + force_purge: + If set to `true`, the room will be purged from database + even if there are still users joined to the room. + """ + + requester_user_id: Optional[str] + new_room_user_id: Optional[str] + new_room_name: Optional[str] + message: Optional[str] + block: bool + purge: bool + force_purge: bool + + class ShutdownRoomResponse(TypedDict): """ Attributes: @@ -1787,12 +1826,12 @@ class RoomShutdownHandler: async def shutdown_room( self, room_id: str, - requester_user_id: Optional[str], - new_room_user_id: Optional[str] = None, - new_room_name: Optional[str] = None, - message: Optional[str] = None, - block: bool = False, - ) -> ShutdownRoomResponse: + params: ShutdownRoomParams, + result: Optional[ShutdownRoomResponse] = None, + update_result_fct: Optional[ + Callable[[Optional[JsonMapping]], Awaitable[None]] + ] = None, + ) -> Optional[ShutdownRoomResponse]: """ Shuts down a room. Moves all local users and room aliases automatically to a new room if `new_room_user_id` is set. Otherwise local users only @@ -1808,52 +1847,23 @@ class RoomShutdownHandler: Args: room_id: The ID of the room to shut down. - requester_user_id: - User who requested the action and put the room on the - blocking list. - If None, the action was not manually requested but instead - triggered automatically, e.g. through a Synapse module - or some other policy. - MUST NOT be None if block=True. - new_room_user_id: - If set, a new room will be created with this user ID - as the creator and admin, and all users in the old room will be - moved into that room. If not set, no new room will be created - and the users will just be removed from the old room. - new_room_name: - A string representing the name of the room that new users will - be invited to. Defaults to `Content Violation Notification` - message: - A string containing the first message that will be sent as - `new_room_user_id` in the new room. Ideally this will clearly - convey why the original room was shut down. - Defaults to `Sharing illegal content on this server is not - permitted and rooms in violation will be blocked.` - block: - If set to `True`, users will be prevented from joining the old - room. This option can also be used to pre-emptively block a room, - even if it's unknown to this homeserver. In this case, the room - will be blocked, and no further action will be taken. If `False`, - attempting to delete an unknown room is invalid. + delete_id: The delete ID identifying this delete request + params: parameters for the shutdown, cf `ShutdownRoomParams` + result: current status of the shutdown, if it was interrupted + update_result_fct: function called when `result` is updated locally - Defaults to `False`. - - Returns: a dict containing the following keys: - kicked_users: An array of users (`user_id`) that were kicked. - failed_to_kick_users: - An array of users (`user_id`) that that were not kicked. - local_aliases: - An array of strings representing the local aliases that were - migrated from the old room to the new. - new_room_id: - A string representing the room ID of the new room, or None if - no such room was created. + Returns: a dict matching `ShutdownRoomResponse`. """ + requester_user_id = params["requester_user_id"] + new_room_user_id = params["new_room_user_id"] + block = params["block"] - if not new_room_name: - new_room_name = self.DEFAULT_ROOM_NAME - if not message: - message = self.DEFAULT_MESSAGE + new_room_name = ( + params["new_room_name"] + if params["new_room_name"] + else self.DEFAULT_ROOM_NAME + ) + message = params["message"] if params["message"] else self.DEFAULT_MESSAGE if not RoomID.is_valid(room_id): raise SynapseError(400, "%s is not a legal room ID" % (room_id,)) @@ -1865,6 +1875,17 @@ class RoomShutdownHandler: 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN ) + result = ( + result + if result + else { + "kicked_users": [], + "failed_to_kick_users": [], + "local_aliases": [], + "new_room_id": None, + } + ) + # Action the block first (even if the room doesn't exist yet) if block: if requester_user_id is None: @@ -1877,14 +1898,10 @@ class RoomShutdownHandler: if not await self.store.get_room(room_id): # if we don't know about the room, there is nothing left to do. - return { - "kicked_users": [], - "failed_to_kick_users": [], - "local_aliases": [], - "new_room_id": None, - } + return result - if new_room_user_id is not None: + new_room_id = result.get("new_room_id") + if new_room_user_id is not None and new_room_id is None: if not self.hs.is_mine_id(new_room_user_id): raise SynapseError( 400, "User must be our own: %s" % (new_room_user_id,) @@ -1904,6 +1921,10 @@ class RoomShutdownHandler: ratelimit=False, ) + result["new_room_id"] = new_room_id + if update_result_fct: + await update_result_fct(result) + logger.info( "Shutting down room %r, joining to new room: %r", room_id, new_room_id ) @@ -1917,12 +1938,9 @@ class RoomShutdownHandler: stream_id, ) else: - new_room_id = None logger.info("Shutting down room %r", room_id) users = await self.store.get_users_in_room(room_id) - kicked_users = [] - failed_to_kick_users = [] for user_id in users: if not self.hs.is_mine_id(user_id): continue @@ -1951,7 +1969,9 @@ class RoomShutdownHandler: stream_id, ) - await self.room_member_handler.forget(target_requester.user, room_id) + await self.room_member_handler.forget( + target_requester.user, room_id, do_not_schedule_purge=True + ) # Join users to new room if new_room_user_id: @@ -1966,15 +1986,23 @@ class RoomShutdownHandler: require_consent=False, ) - kicked_users.append(user_id) + result["kicked_users"].append(user_id) + if update_result_fct: + await update_result_fct(result) except Exception: logger.exception( "Failed to leave old room and join new room for %r", user_id ) - failed_to_kick_users.append(user_id) + result["failed_to_kick_users"].append(user_id) + if update_result_fct: + await update_result_fct(result) # Send message in new room and move aliases if new_room_user_id: + room_creator_requester = create_requester( + new_room_user_id, authenticated_entity=requester_user_id + ) + await self.event_creation_handler.create_and_send_nonmember_event( room_creator_requester, { @@ -1986,18 +2014,15 @@ class RoomShutdownHandler: ratelimit=False, ) - aliases_for_room = await self.store.get_aliases_for_room(room_id) + result["local_aliases"] = list( + await self.store.get_aliases_for_room(room_id) + ) assert new_room_id is not None await self.store.update_aliases_for_room( room_id, new_room_id, requester_user_id ) else: - aliases_for_room = [] + result["local_aliases"] = [] - return { - "kicked_users": kicked_users, - "failed_to_kick_users": failed_to_kick_users, - "local_aliases": list(aliases_for_room), - "new_room_id": new_room_id, - } + return result diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index de0f04e3fe..90343c2306 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -37,13 +37,13 @@ from synapse.api.ratelimiting import Ratelimiter from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext +from synapse.handlers.pagination import PURGE_ROOM_ACTION_NAME from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging import opentracing from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.module_api import NOT_SPAM from synapse.types import ( JsonDict, Requester, @@ -169,6 +169,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.request_ratelimiter = hs.get_request_ratelimiter() hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room) + self._forgotten_room_retention_period = ( + hs.config.server.forgotten_room_retention_period + ) + def _on_user_joined_room(self, event_id: str, room_id: str) -> None: """Notify the rate limiter that a room join has occurred. @@ -278,7 +282,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): """ raise NotImplementedError() - async def forget(self, user: UserID, room_id: str) -> None: + async def forget( + self, user: UserID, room_id: str, do_not_schedule_purge: bool = False + ) -> None: user_id = user.to_string() member = await self._storage_controllers.state.get_current_state_event( @@ -298,6 +304,20 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # the table `current_state_events` and `get_current_state_events` is `None`. await self.store.forget(user_id, room_id) + # If everyone locally has left the room, then there is no reason for us to keep the + # room around and we automatically purge room after a little bit + if ( + not do_not_schedule_purge + and self._forgotten_room_retention_period + and await self.store.is_locally_forgotten_room(room_id) + ): + await self.hs.get_task_scheduler().schedule_task( + PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + timestamp=self.clock.time_msec() + + self._forgotten_room_retention_period, + ) + async def ratelimit_multiple_invites( self, requester: Optional[Requester], @@ -804,7 +824,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): spam_check = await self._spam_checker_module_callbacks.user_may_invite( requester.user.to_string(), target_id, room_id ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: logger.info("Blocking invite due to spam checker") block_invite_result = spam_check @@ -939,7 +959,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): target.to_string(), room_id, is_invited=inviter is not None ) ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "Not allowed to join this room", @@ -1557,7 +1577,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): room_id=room_id, ) ) - if spam_check != NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( 403, "Cannot send threepid invite", diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7ec202be23..65e2aca456 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1741,7 +1741,18 @@ class ModuleApi: """ # Future extensions to this method might want to e.g. allow use of `force_purge`. # TODO In the future we should make sure this is persistent. - self._hs.get_pagination_handler().start_shutdown_and_purge_room(room_id, None) + await self._hs.get_pagination_handler().start_shutdown_and_purge_room( + room_id, + { + "new_room_user_id": None, + "new_room_name": None, + "message": None, + "requester_user_id": None, + "block": False, + "purge": True, + "force_purge": False, + }, + ) async def set_displayname( self, diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 0d42c89ff7..7d0b4b55a0 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -21,6 +21,7 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.errors import Codes, NotFoundError, SynapseError +from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -93,7 +94,7 @@ from synapse.rest.admin.users import ( UserTokenRestServlet, WhoisRestServlet, ) -from synapse.types import JsonDict, RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken, TaskStatus from synapse.util import SYNAPSE_VERSION if TYPE_CHECKING: @@ -196,7 +197,7 @@ class PurgeHistoryRestServlet(RestServlet): errcode=Codes.BAD_JSON, ) - purge_id = self.pagination_handler.start_purge_history( + purge_id = await self.pagination_handler.start_purge_history( room_id, token, delete_local_events=delete_local_events ) @@ -215,11 +216,20 @@ class PurgeHistoryStatusRestServlet(RestServlet): ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - purge_status = self.pagination_handler.get_purge_status(purge_id) - if purge_status is None: + purge_task = await self.pagination_handler.get_delete_task(purge_id) + if purge_task is None or purge_task.action != PURGE_HISTORY_ACTION_NAME: raise NotFoundError("purge id '%s' not found" % purge_id) - return HTTPStatus.OK, purge_status.asdict() + result: JsonDict = { + "status": purge_task.status + if purge_task.status == TaskStatus.COMPLETE + or purge_task.status == TaskStatus.FAILED + else "active", + } + if purge_task.error: + result["error"] = purge_task.error + + return HTTPStatus.OK, result ######################################################################################## diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 1d65560265..436718c8b2 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -19,6 +19,10 @@ from urllib import parse as urlparse from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter +from synapse.handlers.pagination import ( + PURGE_ROOM_ACTION_NAME, + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, +) from synapse.http.servlet import ( ResolveRoomIdMixin, RestServlet, @@ -36,7 +40,7 @@ from synapse.rest.admin._base import ( ) from synapse.storage.databases.main.room import RoomSortOrder from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, RoomID, UserID, create_requester +from synapse.types import JsonDict, RoomID, ScheduledTask, UserID, create_requester from synapse.types.state import StateFilter from synapse.util import json_decoder @@ -117,20 +121,30 @@ class RoomRestV2Servlet(RestServlet): 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN ) - delete_id = self._pagination_handler.start_shutdown_and_purge_room( + delete_id = await self._pagination_handler.start_shutdown_and_purge_room( room_id=room_id, - new_room_user_id=content.get("new_room_user_id"), - new_room_name=content.get("room_name"), - message=content.get("message"), - requester_user_id=requester.user.to_string(), - block=block, - purge=purge, - force_purge=force_purge, + shutdown_params={ + "new_room_user_id": content.get("new_room_user_id"), + "new_room_name": content.get("room_name"), + "message": content.get("message"), + "requester_user_id": requester.user.to_string(), + "block": block, + "purge": purge, + "force_purge": force_purge, + }, ) return HTTPStatus.OK, {"delete_id": delete_id} +def _convert_delete_task_to_response(task: ScheduledTask) -> JsonDict: + return { + "delete_id": task.id, + "status": task.status, + "shutdown_room": task.result, + } + + class DeleteRoomStatusByRoomIdRestServlet(RestServlet): """Get the status of the delete room background task.""" @@ -150,21 +164,16 @@ class DeleteRoomStatusByRoomIdRestServlet(RestServlet): HTTPStatus.BAD_REQUEST, "%s is not a legal room ID" % (room_id,) ) - delete_ids = self._pagination_handler.get_delete_ids_by_room(room_id) - if delete_ids is None: - raise NotFoundError("No delete task for room_id '%s' found" % room_id) + delete_tasks = await self._pagination_handler.get_delete_tasks_by_room(room_id) - response = [] - for delete_id in delete_ids: - delete = self._pagination_handler.get_delete_status(delete_id) - if delete: - response += [ - { - "delete_id": delete_id, - **delete.asdict(), - } - ] - return HTTPStatus.OK, {"results": cast(JsonDict, response)} + if delete_tasks: + return HTTPStatus.OK, { + "results": [ + _convert_delete_task_to_response(task) for task in delete_tasks + ], + } + else: + raise NotFoundError("No delete task for room_id '%s' found" % room_id) class DeleteRoomStatusByDeleteIdRestServlet(RestServlet): @@ -181,11 +190,14 @@ class DeleteRoomStatusByDeleteIdRestServlet(RestServlet): ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) - delete_status = self._pagination_handler.get_delete_status(delete_id) - if delete_status is None: + delete_task = await self._pagination_handler.get_delete_task(delete_id) + if delete_task is None or ( + delete_task.action != PURGE_ROOM_ACTION_NAME + and delete_task.action != SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME + ): raise NotFoundError("delete id '%s' not found" % delete_id) - return HTTPStatus.OK, cast(JsonDict, delete_status.asdict()) + return HTTPStatus.OK, _convert_delete_task_to_response(delete_task) class ListRoomRestServlet(RestServlet): @@ -349,11 +361,15 @@ class RoomRestServlet(RestServlet): ret = await room_shutdown_handler.shutdown_room( room_id=room_id, - new_room_user_id=content.get("new_room_user_id"), - new_room_name=content.get("room_name"), - message=content.get("message"), - requester_user_id=requester.user.to_string(), - block=block, + params={ + "new_room_user_id": content.get("new_room_user_id"), + "new_room_name": content.get("room_name"), + "message": content.get("message"), + "requester_user_id": requester.user.to_string(), + "block": block, + "purge": purge, + "force_purge": force_purge, + }, ) # Purge room diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index eb50086c50..6ed451d7c4 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -15,26 +15,34 @@ import json import time import urllib.parse from typing import List, Optional -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from parameterized import parameterized +from twisted.internet.task import deferLater from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import EventTypes, Membership, RoomTypes from synapse.api.errors import Codes -from synapse.handlers.pagination import PaginationHandler, PurgeStatus +from synapse.handlers.pagination import ( + PURGE_ROOM_ACTION_NAME, + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, +) from synapse.rest.client import directory, events, login, room from synapse.server import HomeServer +from synapse.types import UserID from synapse.util import Clock -from synapse.util.stringutils import random_string +from synapse.util.task_scheduler import TaskScheduler from tests import unittest """Tests admin REST events for /rooms paths.""" +ONE_HOUR_IN_S = 3600 + + class DeleteRoomTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -46,6 +54,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.event_creation_handler = hs.get_event_creation_handler() + self.task_scheduler = hs.get_task_scheduler() hs.config.consent.user_consent_version = "1" consent_uri_builder = Mock() @@ -476,6 +485,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.event_creation_handler = hs.get_event_creation_handler() + self.task_scheduler = hs.get_task_scheduler() hs.config.consent.user_consent_version = "1" consent_uri_builder = Mock() @@ -502,6 +512,9 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): ) self.url_status_by_delete_id = "/_synapse/admin/v2/rooms/delete_status/" + self.room_member_handler = hs.get_room_member_handler() + self.pagination_handler = hs.get_pagination_handler() + @parameterized.expand( [ ("DELETE", "/_synapse/admin/v2/rooms/%s"), @@ -661,7 +674,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): delete_id1 = channel.json_body["delete_id"] # go ahead - self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2) + self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2) # second task channel = self.make_request( @@ -686,12 +699,14 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.assertEqual(2, len(channel.json_body["results"])) self.assertEqual("complete", channel.json_body["results"][0]["status"]) self.assertEqual("complete", channel.json_body["results"][1]["status"]) - self.assertEqual(delete_id1, channel.json_body["results"][0]["delete_id"]) - self.assertEqual(delete_id2, channel.json_body["results"][1]["delete_id"]) + delete_ids = {delete_id1, delete_id2} + self.assertTrue(channel.json_body["results"][0]["delete_id"] in delete_ids) + delete_ids.remove(channel.json_body["results"][0]["delete_id"]) + self.assertTrue(channel.json_body["results"][1]["delete_id"] in delete_ids) # get status after more than clearing time for first task # second task is not cleared - self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2) + self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2) channel = self.make_request( "GET", @@ -705,7 +720,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"]) # get status after more than clearing time for all tasks - self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2) + self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2) channel = self.make_request( "GET", @@ -721,6 +736,13 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): body = {"new_room_user_id": self.admin_user} + # Mock PaginationHandler.purge_room to sleep for 100s, so we have time to do a second call + # before the purge is over. Note that it doesn't purge anymore, but we don't care. + async def purge_room(room_id: str, force: bool) -> None: + await deferLater(self.hs.get_reactor(), 100, lambda: None) + + self.pagination_handler.purge_room = AsyncMock(side_effect=purge_room) # type: ignore[method-assign] + # first call to delete room # and do not wait for finish the task first_channel = self.make_request( @@ -728,7 +750,6 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.url.encode("ascii"), content=body, access_token=self.admin_user_tok, - await_result=False, ) # second call to delete room @@ -742,7 +763,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.assertEqual(400, second_channel.code, msg=second_channel.json_body) self.assertEqual(Codes.UNKNOWN, second_channel.json_body["errcode"]) self.assertEqual( - f"History purge already in progress for {self.room_id}", + f"Purge already in progress for {self.room_id}", second_channel.json_body["error"], ) @@ -751,6 +772,9 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.assertEqual(200, first_channel.code, msg=first_channel.json_body) self.assertIn("delete_id", first_channel.json_body) + # wait for purge_room to finish + self.pump(1) + # check status after finish the task self._test_result( first_channel.json_body["delete_id"], @@ -972,6 +996,115 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): # Assert we can no longer peek into the room self._assert_peek(self.room_id, expect_code=403) + @unittest.override_config({"forgotten_room_retention_period": "1d"}) + def test_purge_forgotten_room(self) -> None: + # Create a test room + room_id = self.helper.create_room_as( + self.admin_user, + tok=self.admin_user_tok, + ) + + self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok) + self.get_success( + self.room_member_handler.forget( + UserID.from_string(self.admin_user), room_id + ) + ) + + # Test that room is not yet purged + with self.assertRaises(AssertionError): + self._is_purged(room_id) + + # Advance 24 hours in the future, past the `forgotten_room_retention_period` + self.reactor.advance(24 * ONE_HOUR_IN_S) + + self._is_purged(room_id) + + def test_scheduled_purge_room(self) -> None: + # Create a test room + room_id = self.helper.create_room_as( + self.admin_user, + tok=self.admin_user_tok, + ) + self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok) + + # Schedule a purge 10 seconds in the future + self.get_success( + self.task_scheduler.schedule_task( + PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + timestamp=self.clock.time_msec() + 10 * 1000, + ) + ) + + # Test that room is not yet purged + with self.assertRaises(AssertionError): + self._is_purged(room_id) + + # Wait for next scheduler run + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + + self._is_purged(room_id) + + def test_schedule_shutdown_room(self) -> None: + # Create a test room + room_id = self.helper.create_room_as( + self.other_user, + tok=self.other_user_tok, + ) + + # Schedule a shutdown 10 seconds in the future + delete_id = self.get_success( + self.task_scheduler.schedule_task( + SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, + resource_id=room_id, + params={ + "requester_user_id": self.admin_user, + "new_room_user_id": self.admin_user, + "new_room_name": None, + "message": None, + "block": False, + "purge": True, + "force_purge": True, + }, + timestamp=self.clock.time_msec() + 10 * 1000, + ) + ) + + # Test that room is not yet shutdown + self._is_member(room_id, self.other_user) + + # Test that room is not yet purged + with self.assertRaises(AssertionError): + self._is_purged(room_id) + + # Wait for next scheduler run + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + + # Test that all users has been kicked (room is shutdown) + self._has_no_members(room_id) + + self._is_purged(room_id) + + # Retrieve delete results + result = self.make_request( + "GET", + self.url_status_by_delete_id + delete_id, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, result.code, msg=result.json_body) + + # Check that the user is in kicked_users + self.assertIn( + self.other_user, result.json_body["shutdown_room"]["kicked_users"] + ) + + new_room_id = result.json_body["shutdown_room"]["new_room_id"] + self.assertTrue(new_room_id) + + # Check that the user is actually in the new room + self._is_member(new_room_id, self.other_user) + def _is_blocked(self, room_id: str, expect: bool = True) -> None: """Assert that the room is blocked or not""" d = self.store.is_room_blocked(room_id) @@ -1034,7 +1167,6 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): kicked_user: a user_id which is kicked from the room expect_new_room: if we expect that a new room was created """ - # get information by room_id channel_room_id = self.make_request( "GET", @@ -1957,11 +2089,8 @@ class RoomMessagesTestCase(unittest.HomeserverTestCase): self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) # Purge every event before the second event. - purge_id = random_string(16) - pagination_handler._purges_by_id[purge_id] = PurgeStatus() self.get_success( - pagination_handler._purge_history( - purge_id=purge_id, + pagination_handler.purge_history( room_id=self.room_id, token=second_token_str, delete_local_events=True, diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index 28b999573e..dfd14f5751 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -22,6 +22,7 @@ from synapse.server import HomeServer from synapse.storage.roommember import RoomsForUser from synapse.types import JsonDict from synapse.util import Clock +from synapse.util.stringutils import random_string from tests import unittest from tests.unittest import override_config @@ -413,11 +414,24 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): self.assertEqual(messages[0]["content"]["body"], "test msg one") self.assertEqual(messages[0]["sender"], "@notices:test") + random_string(16) + # shut down and purge room self.get_success( - self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user) + self.room_shutdown_handler.shutdown_room( + first_room_id, + { + "requester_user_id": self.admin_user, + "new_room_user_id": None, + "new_room_name": None, + "message": None, + "block": False, + "purge": True, + "force_purge": False, + }, + ) ) - self.get_success(self.pagination_handler.purge_room(first_room_id)) + self.get_success(self.pagination_handler.purge_room(first_room_id, force=False)) # user is not member anymore self._check_invite_and_join_status(self.other_user, 0, 0) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 47c1d38ad7..7627823d3f 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -41,7 +41,6 @@ from synapse.api.errors import Codes, HttpResponseException from synapse.appservice import ApplicationService from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.handlers.pagination import PurgeStatus from synapse.rest import admin from synapse.rest.client import account, directory, login, profile, register, room, sync from synapse.server import HomeServer @@ -2086,11 +2085,8 @@ class RoomMessageListTestCase(RoomBase): self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) # Purge every event before the second event. - purge_id = random_string(16) - pagination_handler._purges_by_id[purge_id] = PurgeStatus() self.get_success( - pagination_handler._purge_history( - purge_id=purge_id, + pagination_handler.purge_history( room_id=self.room_id, token=second_token_str, delete_local_events=True, From d2eacfe051d21be8174f1accb92263150654b6b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:17:48 +0100 Subject: [PATCH 481/562] Bump docker/build-push-action from 4 to 5 (#16336) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8a69dc4986..f6772993dd 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -68,7 +68,7 @@ jobs: type=pep440,pattern={{raw}} - name: Build and push all platforms - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: push: true labels: | From 47f9837008e438297741ba6c0d6ce3a417211df6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:19:05 +0100 Subject: [PATCH 482/562] Bump docker/setup-qemu-action from 2 to 3 (#16338) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/release-artifacts.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f6772993dd..f9d2ce67d2 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Set up QEMU id: qemu - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 with: platforms: arm64 diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index f331f67d97..f031127cce 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -134,7 +134,7 @@ jobs: - name: Set up QEMU to emulate aarch64 if: matrix.arch == 'aarch64' - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 with: platforms: arm64 From 4cdc2ed7dc2bc1234964e5f405858ce14e96eb68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:20:50 +0100 Subject: [PATCH 483/562] Bump docker/metadata-action from 4 to 5 (#16337) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/push_complement_image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index b76c4cb323..8e41611f2c 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -55,7 +55,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Work out labels for complement image id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: images: ghcr.io/${{ github.repository }}/complement-synapse tags: | From ab5f4f906d7e0219ebf872cc6853b4f69af11dc9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:21:14 +0100 Subject: [PATCH 484/562] Bump docker/login-action from 2 to 3 (#16339) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 4 ++-- .github/workflows/push_complement_image.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f9d2ce67d2..a1793b9010 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -40,13 +40,13 @@ jobs: echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV - name: Log in to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Log in to GHCR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index 8e41611f2c..e994b122cd 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -48,7 +48,7 @@ jobs: with: ref: master - name: Login to registry - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} From fedaebc440da3a57c4df97787a3629df85a35e38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 10:21:32 +0100 Subject: [PATCH 485/562] Bump typing-extensions from 4.7.1 to 4.8.0 (#16341) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index c01312579e..9d5ec84535 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2077,6 +2077,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2084,8 +2085,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2102,6 +2110,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2109,6 +2118,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3070,13 +3080,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, ] [[package]] From 6946209e671ec278d7648434500aeb2639c8c3c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Joaqu=C3=ADn=20Atria?= Date: Mon, 18 Sep 2023 12:32:01 +0100 Subject: [PATCH 486/562] Set email charset as utf-8 rather than utf8 (#16329) --- changelog.d/16329.bugfix | 1 + synapse/handlers/send_email.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16329.bugfix diff --git a/changelog.d/16329.bugfix b/changelog.d/16329.bugfix new file mode 100644 index 0000000000..2f1f7e8ffe --- /dev/null +++ b/changelog.d/16329.bugfix @@ -0,0 +1 @@ +Use standard name for UTF-8 charset in emails. diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 4f5fe62fe8..657d9b3559 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -174,8 +174,8 @@ class SendEmailHandler: if raw_to == "": raise RuntimeError("Invalid 'to' address") - html_part = MIMEText(html, "html", "utf8") - text_part = MIMEText(text, "plain", "utf8") + html_part = MIMEText(html, "html", "utf-8") + text_part = MIMEText(text, "plain", "utf-8") multipart_msg = MIMEMultipart("alternative") multipart_msg["Subject"] = subject From 5ad1714d420445ec55e600ee52c5561e82a6d516 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:46:02 +0200 Subject: [PATCH 487/562] Bump pillow from 10.0.0 to 10.0.1 (#16344) Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.0.0 to 10.0.1. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/10.0.0...10.0.1) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 112 ++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 57 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9d5ec84535..17d0993a8b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1618,67 +1618,65 @@ files = [ [[package]] name = "pillow" -version = "10.0.0" +version = "10.0.1" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, - {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, - {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, - {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, - {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, - {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, - {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, - {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, - {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, - {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"}, + {file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"}, + {file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"}, + {file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"}, + {file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"}, + {file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"}, + {file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"}, ] [package.extras] From 53b7d9ccf2562616eac4bd77215054dc8a54b600 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:46:02 +0200 Subject: [PATCH 488/562] Bump pillow from 10.0.0 to 10.0.1 (#16344) Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.0.0 to 10.0.1. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/10.0.0...10.0.1) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 112 ++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 57 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1cefabb358..97926336ca 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1618,67 +1618,65 @@ files = [ [[package]] name = "pillow" -version = "10.0.0" +version = "10.0.1" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, - {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, - {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, - {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, - {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, - {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, - {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, - {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, - {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, - {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"}, + {file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"}, + {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"}, + {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"}, + {file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"}, + {file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"}, + {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"}, + {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"}, + {file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"}, + {file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"}, + {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"}, + {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"}, + {file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"}, + {file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"}, + {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"}, + {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"}, + {file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"}, + {file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"}, + {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"}, + {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"}, + {file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"}, + {file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"}, + {file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"}, + {file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"}, ] [package.extras] From 4663d555021ce53c57e418eb1ee4445eab276bc4 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 18 Sep 2023 15:01:23 +0200 Subject: [PATCH 489/562] Mandate Pillow>=10.0.1 because of libwebp CVE (#16347) --- changelog.d/16347.misc | 1 + pyproject.toml | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16347.misc diff --git a/changelog.d/16347.misc b/changelog.d/16347.misc new file mode 100644 index 0000000000..f4f5bfb2de --- /dev/null +++ b/changelog.d/16347.misc @@ -0,0 +1 @@ +Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. diff --git a/pyproject.toml b/pyproject.toml index ea2d75fa8c..9c9a5dc2bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -180,7 +180,9 @@ PyYAML = ">=3.13" pyasn1 = ">=0.1.9" pyasn1-modules = ">=0.0.7" bcrypt = ">=3.1.7" -Pillow = ">=5.4.0" +# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863. +# Packagers that already took care of libwebp can lower that down to 5.4.0. +Pillow = ">=10.0.1" # We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2. sortedcontainers = ">=1.5.2" pymacaroons = ">=0.13.0" From 63d28a88c1d18c64ea7e23b6dd7483e6d5dcf881 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 09:02:12 -0400 Subject: [PATCH 490/562] Additional validation of receipts (#16327) Reject invalid receipts with a reasonable error message & expands tests for receipts. --- changelog.d/16327.bugfix | 1 + synapse/handlers/receipts.py | 26 +++- synapse/rest/client/read_marker.py | 2 +- synapse/rest/client/receipts.py | 2 +- tests/rest/client/test_receipts.py | 221 +++++++++++++++++++++++++++-- tests/rest/client/test_sync.py | 154 +------------------- 6 files changed, 241 insertions(+), 165 deletions(-) create mode 100644 changelog.d/16327.bugfix diff --git a/changelog.d/16327.bugfix b/changelog.d/16327.bugfix new file mode 100644 index 0000000000..be3d1b4f21 --- /dev/null +++ b/changelog.d/16327.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where invalid receipts would be accepted. diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 2bacdebfb5..c7edada353 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -37,6 +37,8 @@ class ReceiptsHandler: self.server_name = hs.config.server.server_name self.store = hs.get_datastores().main self.event_auth_handler = hs.get_event_auth_handler() + self.event_handler = hs.get_event_handler() + self._storage_controllers = hs.get_storage_controllers() self.hs = hs @@ -81,6 +83,20 @@ class ReceiptsHandler: ) continue + # Let's check that the origin server is in the room before accepting the receipt. + # We don't want to block waiting on a partial state so take an + # approximation if needed. + domains = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation( + room_id + ) + if origin not in domains: + logger.info( + "Ignoring receipt for room %r from server %s as they're not in the room", + room_id, + origin, + ) + continue + for receipt_type, users in room_values.items(): for user_id, user_values in users.items(): if get_domain_from_id(user_id) != origin: @@ -158,17 +174,23 @@ class ReceiptsHandler: self, room_id: str, receipt_type: str, - user_id: str, + user_id: UserID, event_id: str, thread_id: Optional[str], ) -> None: """Called when a client tells us a local user has read up to the given event_id in the room. """ + + # Ensure the room/event exists, this will raise an error if the user + # cannot view the event. + if not await self.event_handler.get_event(user_id, room_id, event_id): + return + receipt = ReadReceipt( room_id=room_id, receipt_type=receipt_type, - user_id=user_id, + user_id=user_id.to_string(), event_ids=[event_id], thread_id=thread_id, data={"ts": int(self.clock.time_msec())}, diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index 1707e51972..15e4d56cdb 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -84,7 +84,7 @@ class ReadMarkerRestServlet(RestServlet): await self.receipts_handler.received_client_receipt( room_id, receipt_type, - user_id=requester.user.to_string(), + user_id=requester.user, event_id=event_id, # Setting the thread ID is not possible with the /read_markers endpoint. thread_id=None, diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index 869a374459..814d075faf 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -108,7 +108,7 @@ class ReceiptRestServlet(RestServlet): await self.receipts_handler.received_client_receipt( room_id, receipt_type, - user_id=requester.user.to_string(), + user_id=requester.user, event_id=event_id, thread_id=thread_id, ) diff --git a/tests/rest/client/test_receipts.py b/tests/rest/client/test_receipts.py index 2a7fcea386..ec638c89b7 100644 --- a/tests/rest/client/test_receipts.py +++ b/tests/rest/client/test_receipts.py @@ -11,11 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus +from typing import Optional + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.rest.client import login, receipts, register +from synapse.api.constants import EduTypes, EventTypes, HistoryVisibility, ReceiptTypes +from synapse.rest.client import login, receipts, room, sync from synapse.server import HomeServer +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -24,30 +29,113 @@ from tests import unittest class ReceiptsTestCase(unittest.HomeserverTestCase): servlets = [ login.register_servlets, - register.register_servlets, receipts.register_servlets, synapse.rest.admin.register_servlets, + room.register_servlets, + sync.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.owner = self.register_user("owner", "pass") - self.owner_tok = self.login("owner", "pass") + self.url = "/sync?since=%s" + self.next_batch = "s0" + + # Register the first user + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + # Create the room + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + + # Register the second user + self.user2 = self.register_user("kermit2", "monkey") + self.tok2 = self.login("kermit2", "monkey") + + # Join the second user + self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) def test_send_receipt(self) -> None: + # Send a message. + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertNotEqual(self._get_read_receipt(), None) + + def test_send_receipt_unknown_event(self) -> None: + """Receipts sent for unknown events are ignored to not break message retention.""" + # Attempt to send a receipt to an unknown room. channel = self.make_request( "POST", "/rooms/!abc:beep/receipt/m.read/$def", content={}, - access_token=self.owner_tok, + access_token=self.tok2, ) self.assertEqual(channel.code, 200, channel.result) + self.assertIsNone(self._get_read_receipt()) + + # Attempt to send a receipt to an unknown event. + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/m.read/$def", + content={}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertIsNone(self._get_read_receipt()) + + def test_send_receipt_unviewable_event(self) -> None: + """Receipts sent for unviewable events are errors.""" + # Create a room where new users can't see events from before their join + # & send events into it. + room_id = self.helper.create_room_as( + self.user_id, + tok=self.tok, + extra_content={ + "preset": "private_chat", + "initial_state": [ + { + "content": {"history_visibility": HistoryVisibility.JOINED}, + "state_key": "", + "type": EventTypes.RoomHistoryVisibility, + } + ], + }, + ) + res = self.helper.send(room_id, body="hello", tok=self.tok) + + # Attempt to send a receipt from the wrong user. + channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + content={}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 403, channel.result) + + # Join the user to the room, but they still can't see the event. + self.helper.invite(room_id, self.user_id, self.user2, tok=self.tok) + self.helper.join(room=room_id, user=self.user2, tok=self.tok2) + + channel = self.make_request( + "POST", + f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + content={}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 403, channel.result) def test_send_receipt_invalid_room_id(self) -> None: channel = self.make_request( "POST", "/rooms/not-a-room-id/receipt/m.read/$def", content={}, - access_token=self.owner_tok, + access_token=self.tok, ) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -59,7 +147,7 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): "POST", "/rooms/!abc:beep/receipt/m.read/not-an-event-id", content={}, - access_token=self.owner_tok, + access_token=self.tok, ) self.assertEqual(channel.code, 400, channel.result) self.assertEqual( @@ -71,6 +159,123 @@ class ReceiptsTestCase(unittest.HomeserverTestCase): "POST", "/rooms/!abc:beep/receipt/invalid-receipt-type/$def", content={}, - access_token=self.owner_tok, + access_token=self.tok, ) self.assertEqual(channel.code, 400, channel.result) + + def test_private_read_receipts(self) -> None: + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a private read receipt to tell the server the first user's message was read + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that the first user can't see the other user's private read receipt + self.assertIsNone(self._get_read_receipt()) + + def test_public_receipt_can_override_private(self) -> None: + """ + Sending a public read receipt to the same event which has a private read + receipt should cause that receipt to become public. + """ + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a private read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertIsNone(self._get_read_receipt()) + + # Send a public read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that we did override the private read receipt + self.assertNotEqual(self._get_read_receipt(), None) + + def test_private_receipt_cannot_override_public(self) -> None: + """ + Sending a private read receipt to the same event which has a public read + receipt should cause no change. + """ + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a public read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + self.assertNotEqual(self._get_read_receipt(), None) + + # Send a private read receipt + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", + {}, + access_token=self.tok2, + ) + self.assertEqual(channel.code, 200) + + # Test that we didn't override the public read receipt + self.assertIsNone(self._get_read_receipt()) + + def test_read_receipt_with_empty_body_is_rejected(self) -> None: + # Send a message as the first user + res = self.helper.send(self.room_id, body="hello", tok=self.tok) + + # Send a read receipt for this message with an empty body + channel = self.make_request( + "POST", + f"/rooms/{self.room_id}/receipt/m.read/{res['event_id']}", + access_token=self.tok2, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST) + self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON", channel.json_body) + + def _get_read_receipt(self) -> Optional[JsonDict]: + """Syncs and returns the read receipt.""" + + # Checks if event is a read receipt + def is_read_receipt(event: JsonDict) -> bool: + return event["type"] == EduTypes.RECEIPT + + # Sync + channel = self.make_request( + "GET", + self.url % self.next_batch, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + + # Store the next batch for the next request. + self.next_batch = channel.json_body["next_batch"] + + if channel.json_body.get("rooms", None) is None: + return None + + # Return the read receipt + ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ + "ephemeral" + ]["events"] + receipt_event = filter(is_read_receipt, ephemeral_events) + return next(receipt_event, None) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 9c876c7a32..d60665254e 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import json -from http import HTTPStatus -from typing import List, Optional +from typing import List from parameterized import parameterized @@ -22,7 +21,6 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import ( - EduTypes, EventContentFields, EventTypes, ReceiptTypes, @@ -376,156 +374,6 @@ class SyncKnockTestCase(KnockingStrippedStateEventHelperMixin): ) -class ReadReceiptsTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - receipts.register_servlets, - room.register_servlets, - sync.register_servlets, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - - return self.setup_test_homeserver(config=config) - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.url = "/sync?since=%s" - self.next_batch = "s0" - - # Register the first user - self.user_id = self.register_user("kermit", "monkey") - self.tok = self.login("kermit", "monkey") - - # Create the room - self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - - # Register the second user - self.user2 = self.register_user("kermit2", "monkey") - self.tok2 = self.login("kermit2", "monkey") - - # Join the second user - self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) - - def test_private_read_receipts(self) -> None: - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a private read receipt to tell the server the first user's message was read - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - - # Test that the first user can't see the other user's private read receipt - self.assertIsNone(self._get_read_receipt()) - - def test_public_receipt_can_override_private(self) -> None: - """ - Sending a public read receipt to the same event which has a private read - receipt should cause that receipt to become public. - """ - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a private read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - self.assertIsNone(self._get_read_receipt()) - - # Send a public read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - - # Test that we did override the private read receipt - self.assertNotEqual(self._get_read_receipt(), None) - - def test_private_receipt_cannot_override_public(self) -> None: - """ - Sending a private read receipt to the same event which has a public read - receipt should cause no change. - """ - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a public read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - self.assertNotEqual(self._get_read_receipt(), None) - - # Send a private read receipt - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/{ReceiptTypes.READ_PRIVATE}/{res['event_id']}", - {}, - access_token=self.tok2, - ) - self.assertEqual(channel.code, 200) - - # Test that we didn't override the public read receipt - self.assertIsNone(self._get_read_receipt()) - - def test_read_receipt_with_empty_body_is_rejected(self) -> None: - # Send a message as the first user - res = self.helper.send(self.room_id, body="hello", tok=self.tok) - - # Send a read receipt for this message with an empty body - channel = self.make_request( - "POST", - f"/rooms/{self.room_id}/receipt/m.read/{res['event_id']}", - access_token=self.tok2, - ) - self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST) - self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON", channel.json_body) - - def _get_read_receipt(self) -> Optional[JsonDict]: - """Syncs and returns the read receipt.""" - - # Checks if event is a read receipt - def is_read_receipt(event: JsonDict) -> bool: - return event["type"] == EduTypes.RECEIPT - - # Sync - channel = self.make_request( - "GET", - self.url % self.next_batch, - access_token=self.tok, - ) - self.assertEqual(channel.code, 200) - - # Store the next batch for the next request. - self.next_batch = channel.json_body["next_batch"] - - if channel.json_body.get("rooms", None) is None: - return None - - # Return the read receipt - ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ - "ephemeral" - ]["events"] - receipt_event = filter(is_read_receipt, ephemeral_events) - return next(receipt_event, None) - - class UnreadMessagesTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, From 053155a2af52aa66910e4a22dad60109607b1098 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 18 Sep 2023 15:01:23 +0200 Subject: [PATCH 491/562] Mandate Pillow>=10.0.1 because of libwebp CVE (#16347) --- changelog.d/16347.misc | 1 + pyproject.toml | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16347.misc diff --git a/changelog.d/16347.misc b/changelog.d/16347.misc new file mode 100644 index 0000000000..f4f5bfb2de --- /dev/null +++ b/changelog.d/16347.misc @@ -0,0 +1 @@ +Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. diff --git a/pyproject.toml b/pyproject.toml index 1144114041..d66089a67d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -174,7 +174,9 @@ PyYAML = ">=3.13" pyasn1 = ">=0.1.9" pyasn1-modules = ">=0.0.7" bcrypt = ">=3.1.7" -Pillow = ">=5.4.0" +# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863. +# Packagers that already took care of libwebp can lower that down to 5.4.0. +Pillow = ">=10.0.1" # We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2. sortedcontainers = ">=1.5.2" pymacaroons = ">=0.13.0" From 85bfd4735e0b6e31d530f692d7113b4fec89e6b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 09:29:05 -0400 Subject: [PATCH 492/562] Return an immutable value from get_latest_event_ids_in_room. (#16326) --- changelog.d/16326.misc | 1 + synapse/events/builder.py | 2 +- synapse/handlers/federation_event.py | 8 +++--- synapse/storage/controllers/persist_events.py | 9 +++---- .../databases/main/event_federation.py | 8 +++--- synapse/storage/databases/main/events.py | 2 +- tests/handlers/test_presence.py | 2 +- tests/replication/storage/test_events.py | 4 +-- tests/replication/tcp/streams/test_events.py | 10 +++---- .../test_federation_sender_shard.py | 2 +- tests/storage/test_cleanup_extrems.py | 14 +++++----- tests/test_federation.py | 26 ++++++++++++------- 12 files changed, 48 insertions(+), 40 deletions(-) create mode 100644 changelog.d/16326.misc diff --git a/changelog.d/16326.misc b/changelog.d/16326.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16326.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 1165c017ba..43469b170f 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -103,7 +103,7 @@ class EventBuilder: async def build( self, - prev_event_ids: StrCollection, + prev_event_ids: List[str], auth_event_ids: Optional[List[str]], depth: Optional[int] = None, ) -> EventBase: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index d32d224d56..eedde97ab0 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -723,12 +723,11 @@ class FederationEventHandler: if not prevs - seen: return - latest_list = await self._store.get_latest_event_ids_in_room(room_id) + latest_frozen = await self._store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us - latest = set(latest_list) - latest |= seen + latest = seen | latest_frozen logger.info( "Requesting missing events between %s and %s", @@ -1976,8 +1975,7 @@ class FederationEventHandler: # partial and full state and may not be accurate. return - extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id) - extrem_ids = set(extrem_ids_list) + extrem_ids = await self._store.get_latest_event_ids_in_room(event.room_id) prev_event_ids = set(event.prev_event_ids()) if extrem_ids == prev_event_ids: diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 6864f93090..f39ae2d635 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -19,6 +19,7 @@ import logging from collections import deque from typing import ( TYPE_CHECKING, + AbstractSet, Any, Awaitable, Callable, @@ -618,7 +619,7 @@ class EventsPersistenceStorageController: ) for room_id, ev_ctx_rm in events_by_room.items(): - latest_event_ids = set( + latest_event_ids = ( await self.main_store.get_latest_event_ids_in_room(room_id) ) new_latest_event_ids = await self._calculate_new_extremities( @@ -740,7 +741,7 @@ class EventsPersistenceStorageController: self, room_id: str, event_contexts: List[Tuple[EventBase, EventContext]], - latest_event_ids: Collection[str], + latest_event_ids: AbstractSet[str], ) -> Set[str]: """Calculates the new forward extremities for a room given events to persist. @@ -758,8 +759,6 @@ class EventsPersistenceStorageController: and not event.internal_metadata.is_soft_failed() ] - latest_event_ids = set(latest_event_ids) - # start with the existing forward extremities result = set(latest_event_ids) @@ -798,7 +797,7 @@ class EventsPersistenceStorageController: self, room_id: str, events_context: List[Tuple[EventBase, EventContext]], - old_latest_event_ids: Set[str], + old_latest_event_ids: AbstractSet[str], new_latest_event_ids: Set[str], ) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]], Set[str]]: """Calculate the current state dict after adding some new events to diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 09de8f55e2..afffa54985 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -19,6 +19,7 @@ from typing import ( TYPE_CHECKING, Collection, Dict, + FrozenSet, Iterable, List, Optional, @@ -47,7 +48,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict, StrCollection, StrSequence +from synapse.types import JsonDict, StrCollection from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache @@ -1179,13 +1180,14 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas ) @cached(max_entries=5000, iterable=True) - async def get_latest_event_ids_in_room(self, room_id: str) -> StrSequence: - return await self.db_pool.simple_select_onecol( + async def get_latest_event_ids_in_room(self, room_id: str) -> FrozenSet[str]: + event_ids = await self.db_pool.simple_select_onecol( table="event_forward_extremities", keyvalues={"room_id": room_id}, retcol="event_id", desc="get_latest_event_ids_in_room", ) + return frozenset(event_ids) async def get_min_depth(self, room_id: str) -> Optional[int]: """For the given room, get the minimum depth we have seen for it.""" diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 0c1ed75240..bc8474a589 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -222,7 +222,7 @@ class PersistEventsStore: for room_id, latest_event_ids in new_forward_extremities.items(): self.store.get_latest_event_ids_in_room.prefill( - (room_id,), list(latest_event_ids) + (room_id,), frozenset(latest_event_ids) ) async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]: diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 638787b029..41c8c44e02 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -1858,7 +1858,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): ) event = self.get_success( - builder.build(prev_event_ids=prev_event_ids, auth_event_ids=None) + builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None) ) self.get_success(self.federation_event_handler.on_receive_pdu(hostname, event)) diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index af25815fa5..33c277a38a 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -90,7 +90,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): def test_get_latest_event_ids_in_room(self) -> None: create = self.persist(type="m.room.create", key="", creator=USER_ID) self.replicate() - self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) + self.check("get_latest_event_ids_in_room", (ROOM_ID,), {create.event_id}) join = self.persist( type="m.room.member", @@ -99,7 +99,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): prev_events=[(create.event_id, {})], ) self.replicate() - self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) + self.check("get_latest_event_ids_in_room", (ROOM_ID,), {join.event_id}) def test_redactions(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 65ef4bb160..128fc3e046 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Optional, Sequence +from typing import Any, List, Optional from twisted.test.proto_helpers import MemoryReactor @@ -139,7 +139,7 @@ class EventsStreamTestCase(BaseStreamTestCase): ) # this is the point in the DAG where we make a fork - fork_point: Sequence[str] = self.get_success( + fork_point = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -294,7 +294,7 @@ class EventsStreamTestCase(BaseStreamTestCase): ) # this is the point in the DAG where we make a fork - fork_point: Sequence[str] = self.get_success( + fork_point = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -316,14 +316,14 @@ class EventsStreamTestCase(BaseStreamTestCase): self.test_handler.received_rdata_rows.clear() # now roll back all that state by de-modding the users - prev_events = fork_point + prev_events = list(fork_point) pl_events = [] for u in user_ids: pls["users"][u] = 0 e = self.get_success( inject_event( self.hs, - prev_event_ids=list(prev_events), + prev_event_ids=prev_events, type=EventTypes.PowerLevels, state_key="", sender=self.user_id, diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 9b28cd474f..59f4fdc70b 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -261,7 +261,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): builder = factory.for_room_version(room_version, event_dict) join_event = self.get_success( - builder.build(prev_event_ids=prev_event_ids, auth_event_ids=None) + builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None) ) self.get_success(federation.on_send_membership_event(remote_server, join_event)) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 7de109966d..ceb9597dd3 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -120,7 +120,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(latest_event_ids, [event_id_4]) + self.assertEqual(latest_event_ids, {event_id_4}) def test_basic_cleanup(self) -> None: """Test that extremities are correctly calculated in the presence of @@ -147,7 +147,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b}) + self.assertEqual(latest_event_ids, {event_id_a, event_id_b}) # Run the background update and check it did the right thing self.run_background_update() @@ -155,7 +155,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(latest_event_ids, [event_id_b]) + self.assertEqual(latest_event_ids, {event_id_b}) def test_chain_of_fail_cleanup(self) -> None: """Test that extremities are correctly calculated in the presence of @@ -185,7 +185,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b}) + self.assertEqual(latest_event_ids, {event_id_a, event_id_b}) # Run the background update and check it did the right thing self.run_background_update() @@ -193,7 +193,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(latest_event_ids, [event_id_b]) + self.assertEqual(latest_event_ids, {event_id_b}) def test_forked_graph_cleanup(self) -> None: r"""Test that extremities are correctly calculated in the presence of @@ -240,7 +240,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b, event_id_c}) + self.assertEqual(latest_event_ids, {event_id_a, event_id_b, event_id_c}) # Run the background update and check it did the right thing self.run_background_update() @@ -248,7 +248,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), {event_id_b, event_id_c}) + self.assertEqual(latest_event_ids, {event_id_b, event_id_c}) class CleanupExtremDummyEventsTestCase(HomeserverTestCase): diff --git a/tests/test_federation.py b/tests/test_federation.py index f8ade6da38..1b0504709e 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -51,9 +51,15 @@ class MessageAcceptTests(unittest.HomeserverTestCase): self.store = self.hs.get_datastores().main # Figure out what the most recent event is - most_recent = self.get_success( - self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) - )[0] + most_recent = next( + iter( + self.get_success( + self.hs.get_datastores().main.get_latest_event_ids_in_room( + self.room_id + ) + ) + ) + ) join_event = make_event_from_dict( { @@ -100,8 +106,8 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Make sure we actually joined the room self.assertEqual( - self.get_success(self.store.get_latest_event_ids_in_room(self.room_id))[0], - "$join:test.serv", + self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)), + {"$join:test.serv"}, ) def test_cant_hide_direct_ancestors(self) -> None: @@ -127,9 +133,11 @@ class MessageAcceptTests(unittest.HomeserverTestCase): self.http_client.post_json = post_json # Figure out what the most recent event is - most_recent = self.get_success( - self.store.get_latest_event_ids_in_room(self.room_id) - )[0] + most_recent = next( + iter( + self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) + ) + ) # Now lie about an event lying_event = make_event_from_dict( @@ -165,7 +173,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase): # Make sure the invalid event isn't there extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) - self.assertEqual(extrem[0], "$join:test.serv") + self.assertEqual(extrem, {"$join:test.serv"}) def test_retry_device_list_resync(self) -> None: """Tests that device lists are marked as stale if they couldn't be synced, and From d8aed6fba7c4b919c5e76352a84686f85b642efc Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 18 Sep 2023 15:29:46 +0200 Subject: [PATCH 493/562] 1.92.3 --- CHANGES.md | 17 +++++++++++++++++ changelog.d/16347.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 24 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/16347.misc diff --git a/CHANGES.md b/CHANGES.md index f913c2069b..09c1ec10d0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,20 @@ +# Synapse 1.92.3 (2023-09-18) + +This is again an update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863). +It turns out that libwebp is bundled statically in Pillow wheels so we need to update this dependency instead of +libwebp package at the OS level. + +Unlike what was advertised in 1.92.2 changelog this release also impacts PyPI wheels and Debian packages. + + +### Internal Changes + +- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347)) + +### Updates to locked dependencies + +* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) + # Synapse 1.92.2 (2023-09-15) This is a Docker-only update to mitigate [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863), a critical vulnerability in `libwebp`. Server admins not using Docker should ensure that their `libwebp` is up to date (if installed). We encourage admins to upgrade as soon as possible. diff --git a/changelog.d/16347.misc b/changelog.d/16347.misc deleted file mode 100644 index f4f5bfb2de..0000000000 --- a/changelog.d/16347.misc +++ /dev/null @@ -1 +0,0 @@ -Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. diff --git a/debian/changelog b/debian/changelog index 79e7fccfca..254ca26fd8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.92.3) stable; urgency=medium + + * New Synapse release 1.92.3. + + -- Synapse Packaging team Mon, 18 Sep 2023 15:05:04 +0200 + matrix-synapse-py3 (1.92.2) stable; urgency=medium * New Synapse release 1.92.2. diff --git a/pyproject.toml b/pyproject.toml index d66089a67d..572e886725 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.92.2" +version = "1.92.3" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From e36990c00e201b35b62a91991be15c35edb20d8d Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 18 Sep 2023 15:35:57 +0200 Subject: [PATCH 494/562] Update changelog --- CHANGES.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 09c1ec10d0..79967f7e76 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,12 @@ # Synapse 1.92.3 (2023-09-18) -This is again an update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863). +This is again a security update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863). It turns out that libwebp is bundled statically in Pillow wheels so we need to update this dependency instead of libwebp package at the OS level. -Unlike what was advertised in 1.92.2 changelog this release also impacts PyPI wheels and Debian packages. +Unlike what was advertised in 1.92.2 changelog this release also impacts PyPI wheels and Debian packages from matrix.org. + +We encourage admins to upgrade as soon as possible. ### Internal Changes From c1e244c8f70ff1a23e358e1608c555f9722dee1f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 09:55:04 -0400 Subject: [PATCH 495/562] Make cached account data/tags/admin types immutable (#16325) --- changelog.d/16325.misc | 1 + synapse/app/admin_cmd.py | 14 +++++----- synapse/handlers/admin.py | 18 ++++++------- synapse/handlers/sync.py | 27 +++++++++++-------- synapse/rest/admin/users.py | 8 +++--- synapse/rest/client/account_data.py | 10 +++---- .../storage/databases/main/account_data.py | 14 +++++----- .../databases/main/experimental_features.py | 7 +++-- synapse/storage/databases/main/tags.py | 6 ++--- 9 files changed, 55 insertions(+), 50 deletions(-) create mode 100644 changelog.d/16325.misc diff --git a/changelog.d/16325.misc b/changelog.d/16325.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16325.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index f9aada269a..aa24f7da6c 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -17,7 +17,7 @@ import logging import os import sys import tempfile -from typing import List, Mapping, Optional +from typing import List, Mapping, Optional, Sequence from twisted.internet import defer, task @@ -57,7 +57,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore -from synapse.types import JsonDict, StateMap +from synapse.types import JsonMapping, StateMap from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext @@ -198,7 +198,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): for event in state.values(): json.dump(event, fp=f) - def write_profile(self, profile: JsonDict) -> None: + def write_profile(self, profile: JsonMapping) -> None: user_directory = os.path.join(self.base_directory, "user_data") os.makedirs(user_directory, exist_ok=True) profile_file = os.path.join(user_directory, "profile") @@ -206,7 +206,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): with open(profile_file, "a") as f: json.dump(profile, fp=f) - def write_devices(self, devices: List[JsonDict]) -> None: + def write_devices(self, devices: Sequence[JsonMapping]) -> None: user_directory = os.path.join(self.base_directory, "user_data") os.makedirs(user_directory, exist_ok=True) device_file = os.path.join(user_directory, "devices") @@ -215,7 +215,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): with open(device_file, "a") as f: json.dump(device, fp=f) - def write_connections(self, connections: List[JsonDict]) -> None: + def write_connections(self, connections: Sequence[JsonMapping]) -> None: user_directory = os.path.join(self.base_directory, "user_data") os.makedirs(user_directory, exist_ok=True) connection_file = os.path.join(user_directory, "connections") @@ -225,7 +225,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): json.dump(connection, fp=f) def write_account_data( - self, file_name: str, account_data: Mapping[str, JsonDict] + self, file_name: str, account_data: Mapping[str, JsonMapping] ) -> None: account_data_directory = os.path.join( self.base_directory, "user_data", "account_data" @@ -237,7 +237,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): with open(account_data_file, "a") as f: json.dump(account_data, fp=f) - def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + def write_media_id(self, media_id: str, media_metadata: JsonMapping) -> None: file_directory = os.path.join(self.base_directory, "media_ids") os.makedirs(file_directory, exist_ok=True) media_id_file = os.path.join(file_directory, media_id) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 7092ff3449..ba9704a065 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -14,11 +14,11 @@ import abc import logging -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Set +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set from synapse.api.constants import Direction, Membership from synapse.events import EventBase -from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID, UserInfo +from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -35,7 +35,7 @@ class AdminHandler: self._state_storage_controller = self._storage_controllers.state self._msc3866_enabled = hs.config.experimental.msc3866.enabled - async def get_whois(self, user: UserID) -> JsonDict: + async def get_whois(self, user: UserID) -> JsonMapping: connections = [] sessions = await self._store.get_user_ip_and_agents(user) @@ -55,7 +55,7 @@ class AdminHandler: return ret - async def get_user(self, user: UserID) -> Optional[JsonDict]: + async def get_user(self, user: UserID) -> Optional[JsonMapping]: """Function to get user details""" user_info: Optional[UserInfo] = await self._store.get_user_by_id( user.to_string() @@ -344,7 +344,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def write_profile(self, profile: JsonDict) -> None: + def write_profile(self, profile: JsonMapping) -> None: """Write the profile of a user. Args: @@ -353,7 +353,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def write_devices(self, devices: List[JsonDict]) -> None: + def write_devices(self, devices: Sequence[JsonMapping]) -> None: """Write the devices of a user. Args: @@ -362,7 +362,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def write_connections(self, connections: List[JsonDict]) -> None: + def write_connections(self, connections: Sequence[JsonMapping]) -> None: """Write the connections of a user. Args: @@ -372,7 +372,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta): @abc.abstractmethod def write_account_data( - self, file_name: str, account_data: Mapping[str, JsonDict] + self, file_name: str, account_data: Mapping[str, JsonMapping] ) -> None: """Write the account data of a user. @@ -383,7 +383,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + def write_media_id(self, media_id: str, media_metadata: JsonMapping) -> None: """Write the media's metadata of a user. Exports only the metadata, as this can be fetched from the database via read only. In order to access the files, a connection to the correct diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f1f19666d7..1a4d394eda 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -57,6 +57,7 @@ from synapse.storage.roommember import MemberSummary from synapse.types import ( DeviceListUpdates, JsonDict, + JsonMapping, MutableStateMap, Requester, RoomStreamToken, @@ -1793,19 +1794,23 @@ class SyncHandler: ) if push_rules_changed: - global_account_data = dict(global_account_data) - global_account_data[ - AccountDataTypes.PUSH_RULES - ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) + global_account_data = { + AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user( + sync_config.user + ), + **global_account_data, + } else: all_global_account_data = await self.store.get_global_account_data_for_user( user_id ) - global_account_data = dict(all_global_account_data) - global_account_data[ - AccountDataTypes.PUSH_RULES - ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) + global_account_data = { + AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user( + sync_config.user + ), + **all_global_account_data, + } account_data_for_user = ( await sync_config.filter_collection.filter_global_account_data( @@ -1909,7 +1914,7 @@ class SyncHandler: blocks_all_rooms or sync_result_builder.sync_config.filter_collection.blocks_all_room_account_data() ): - account_data_by_room: Mapping[str, Mapping[str, JsonDict]] = {} + account_data_by_room: Mapping[str, Mapping[str, JsonMapping]] = {} elif since_token and not sync_result_builder.full_state: account_data_by_room = ( await self.store.get_updated_room_account_data_for_user( @@ -2349,8 +2354,8 @@ class SyncHandler: sync_result_builder: "SyncResultBuilder", room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], - tags: Optional[Mapping[str, Mapping[str, Any]]], - account_data: Mapping[str, JsonDict], + tags: Optional[Mapping[str, JsonMapping]], + account_data: Mapping[str, JsonMapping], always_include: bool = False, ) -> None: """Populates the `joined` and `archived` section of `sync_result_builder` diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 91898a5c13..9aaa88e229 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -39,7 +39,7 @@ from synapse.rest.admin._base import ( from synapse.rest.client._base import client_patterns from synapse.storage.databases.main.registration import ExternalIDReuseException from synapse.storage.databases.main.stats import UserSortOrder -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, JsonMapping, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -211,7 +211,7 @@ class UserRestServletV2(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: await assert_requester_is_admin(self.auth, request) target_user = UserID.from_string(user_id) @@ -226,7 +226,7 @@ class UserRestServletV2(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) @@ -658,7 +658,7 @@ class WhoisRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index b1f9e9dc9b..ce0c4e7742 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -20,7 +20,7 @@ from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.types import JsonDict, RoomID +from synapse.types import JsonDict, JsonMapping, RoomID from ._base import client_patterns @@ -95,7 +95,7 @@ class AccountDataServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str, account_data_type: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") @@ -106,7 +106,7 @@ class AccountDataServlet(RestServlet): and account_data_type == AccountDataTypes.PUSH_RULES ): account_data: Optional[ - JsonDict + JsonMapping ] = await self._push_rules_handler.push_rules_for_user(requester.user) else: account_data = await self.store.get_global_account_data_by_type_for_user( @@ -236,7 +236,7 @@ class RoomAccountDataServlet(RestServlet): user_id: str, room_id: str, account_data_type: str, - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot get account data for other users.") @@ -253,7 +253,7 @@ class RoomAccountDataServlet(RestServlet): self._hs.config.experimental.msc4010_push_rules_account_data and account_data_type == AccountDataTypes.PUSH_RULES ): - account_data: Optional[JsonDict] = {} + account_data: Optional[JsonMapping] = {} else: account_data = await self.store.get_account_data_for_room_and_type( user_id, room_id, account_data_type diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 8f7bdbc61a..80f146dd53 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -43,7 +43,7 @@ from synapse.storage.util.id_generators import ( MultiWriterIdGenerator, StreamIdGenerator, ) -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping from synapse.util import json_encoder from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -119,7 +119,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) @cached() async def get_global_account_data_for_user( self, user_id: str - ) -> Mapping[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """ Get all the global client account_data for a user. @@ -164,7 +164,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) @cached() async def get_room_account_data_for_user( self, user_id: str - ) -> Mapping[str, Mapping[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonMapping]]: """ Get all of the per-room client account_data for a user. @@ -213,7 +213,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) @cached(num_args=2, max_entries=5000, tree=True) async def get_global_account_data_by_type_for_user( self, user_id: str, data_type: str - ) -> Optional[JsonDict]: + ) -> Optional[JsonMapping]: """ Returns: The account data. @@ -265,7 +265,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) @cached(num_args=2, tree=True) async def get_account_data_for_room( self, user_id: str, room_id: str - ) -> Mapping[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """Get all the client account_data for a user for a room. Args: @@ -296,7 +296,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) @cached(num_args=3, max_entries=5000, tree=True) async def get_account_data_for_room_and_type( self, user_id: str, room_id: str, account_data_type: str - ) -> Optional[JsonDict]: + ) -> Optional[JsonMapping]: """Get the client account_data of given type for a user for a room. Args: @@ -394,7 +394,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_global_account_data_for_user( self, user_id: str, stream_id: int - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """Get all the global account_data that's changed for a user. Args: diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py index cf3226ae5a..654f924019 100644 --- a/synapse/storage/databases/main/experimental_features.py +++ b/synapse/storage/databases/main/experimental_features.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict +from typing import TYPE_CHECKING, Dict, FrozenSet from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main import CacheInvalidationWorkerStore -from synapse.types import StrCollection from synapse.util.caches.descriptors import cached if TYPE_CHECKING: @@ -34,7 +33,7 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore): super().__init__(database, db_conn, hs) @cached() - async def list_enabled_features(self, user_id: str) -> StrCollection: + async def list_enabled_features(self, user_id: str) -> FrozenSet[str]: """ Checks to see what features are enabled for a given user Args: @@ -49,7 +48,7 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore): ["feature"], ) - return [feature["feature"] for feature in enabled] + return frozenset(feature["feature"] for feature in enabled) async def set_features_for_user( self, diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index c149a9eacb..61403a98cf 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -23,7 +23,7 @@ from synapse.storage._base import db_to_json from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main.account_data import AccountDataWorkerStore from synapse.storage.util.id_generators import AbstractStreamIdGenerator -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -34,7 +34,7 @@ class TagsWorkerStore(AccountDataWorkerStore): @cached() async def get_tags_for_user( self, user_id: str - ) -> Mapping[str, Mapping[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonMapping]]: """Get all the tags for a user. @@ -109,7 +109,7 @@ class TagsWorkerStore(AccountDataWorkerStore): async def get_updated_tags( self, user_id: str, stream_id: int - ) -> Mapping[str, Mapping[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonMapping]]: """Get all the tags for the rooms where the tags have changed since the given version From eef2b9e34418e902baab1e730eb805eb56034cc2 Mon Sep 17 00:00:00 2001 From: Hanadi Date: Mon, 18 Sep 2023 16:37:51 +0200 Subject: [PATCH 496/562] Filter locked users in the admin API (#16328) Co-authored-by: Hanadi Tamimi --- changelog.d/16328.feature | 1 + docs/admin_api/user_admin_api.md | 17 +++++++++----- synapse/rest/admin/users.py | 6 ++++- synapse/storage/databases/main/__init__.py | 7 +++++- synapse/storage/databases/main/stats.py | 1 + tests/rest/admin/test_user.py | 26 ++++++++++++++++++++++ 6 files changed, 51 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16328.feature diff --git a/changelog.d/16328.feature b/changelog.d/16328.feature new file mode 100644 index 0000000000..9fadf766cc --- /dev/null +++ b/changelog.d/16328.feature @@ -0,0 +1 @@ +Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 975a7a0da4..f83facabe4 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -54,7 +54,8 @@ It returns a JSON body like the following: "external_id": "" } ], - "user_type": null + "user_type": null, + "locked": false } ``` @@ -103,7 +104,8 @@ with a body of: ], "admin": false, "deactivated": false, - "user_type": null + "user_type": null, + "locked": false } ``` @@ -184,7 +186,8 @@ A response body like the following is returned: "shadow_banned": 0, "displayname": "", "avatar_url": null, - "creation_ts": 1560432668000 + "creation_ts": 1560432668000, + "locked": false }, { "name": "", "is_guest": 0, @@ -195,7 +198,8 @@ A response body like the following is returned: "shadow_banned": 0, "displayname": "", "avatar_url": "", - "creation_ts": 1561550621000 + "creation_ts": 1561550621000, + "locked": false } ], "next_token": "100", @@ -249,6 +253,8 @@ The following parameters should be set in the URL: - `not_user_type` - Exclude certain user types, such as bot users, from the request. Can be provided multiple times. Possible values are `bot`, `support` or "empty string". "empty string" here means to exclude users without a type. +- `locked` - string representing a bool - Is optional and if `true` will **include** locked users. + Defaults to `false` to exclude locked users. Note: Introduced in v1.93. Caution. The database only has indexes on the columns `name` and `creation_ts`. This means that if a different sort order is used (`is_guest`, `admin`, @@ -274,10 +280,11 @@ The following fields are returned in the JSON response body: - `avatar_url` - string - The user's avatar URL if they have set one. - `creation_ts` - integer - The user's creation timestamp in ms. - `last_seen_ts` - integer - The user's last activity timestamp in ms. - + - `locked` - bool - Status if that user has been marked as locked. Note: Introduced in v1.93. - `next_token`: string representing a positive integer - Indication for pagination. See above. - `total` - integer - Total number of media. +*Added in Synapse 1.93:* the `locked` query parameter and response field. ## Query current sessions for a user diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 9aaa88e229..5b743a1d03 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -66,6 +66,7 @@ class UsersRestServletV2(RestServlet): The parameter `deactivated` can be used to include deactivated users. The parameter `order_by` can be used to order the result. The parameter `not_user_type` can be used to exclude certain user types. + The parameter `locked` can be used to include locked users. Possible values are `bot`, `support` or "empty string". "empty string" here means to exclude users without a type. """ @@ -107,8 +108,9 @@ class UsersRestServletV2(RestServlet): "The guests parameter is not supported when MSC3861 is enabled.", errcode=Codes.INVALID_PARAM, ) - deactivated = parse_boolean(request, "deactivated", default=False) + deactivated = parse_boolean(request, "deactivated", default=False) + locked = parse_boolean(request, "locked", default=False) admins = parse_boolean(request, "admins") # If support for MSC3866 is not enabled, apply no filtering based on the @@ -133,6 +135,7 @@ class UsersRestServletV2(RestServlet): UserSortOrder.SHADOW_BANNED.value, UserSortOrder.CREATION_TS.value, UserSortOrder.LAST_SEEN_TS.value, + UserSortOrder.LOCKED.value, ), ) @@ -154,6 +157,7 @@ class UsersRestServletV2(RestServlet): direction, approved, not_user_types, + locked, ) # If support for MSC3866 is not enabled, don't show the approval flag. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 0836e247ef..101403578c 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -175,6 +175,7 @@ class DataStore( direction: Direction = Direction.FORWARDS, approved: bool = True, not_user_types: Optional[List[str]] = None, + locked: bool = False, ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users from users list. This will return a json list of users and the @@ -194,6 +195,7 @@ class DataStore( direction: sort ascending or descending approved: whether to include approved users not_user_types: list of user types to exclude + locked: whether to include locked users Returns: A tuple of a list of mappings from user to information and a count of total users. """ @@ -226,6 +228,9 @@ class DataStore( if not deactivated: filters.append("deactivated = 0") + if not locked: + filters.append("locked IS FALSE") + if admins is not None: if admins: filters.append("admin = 1") @@ -290,7 +295,7 @@ class DataStore( sql = f""" SELECT name, user_type, is_guest, admin, deactivated, shadow_banned, displayname, avatar_url, creation_ts * 1000 as creation_ts, approved, - eu.user_id is not null as erased, last_seen_ts + eu.user_id is not null as erased, last_seen_ts, locked {sql_base} ORDER BY {order_by_column} {order}, u.name ASC LIMIT ? OFFSET ? diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 3a2966b9e4..9d403919e4 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -108,6 +108,7 @@ class UserSortOrder(Enum): SHADOW_BANNED = "shadow_banned" CREATION_TS = "creation_ts" LAST_SEEN_TS = "last_seen_ts" + LOCKED = "locked" class StatsStore(StateDeltasStore): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 761871b933..b326ad2c90 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1146,6 +1146,32 @@ class UsersListTestCase(unittest.HomeserverTestCase): users = {user["name"]: user for user in channel.json_body["users"]} self.assertIs(users[user_id]["erased"], True) + def test_filter_locked(self) -> None: + # Create a new user. + user_id = self.register_user("lockme", "lockme") + + # Lock them + self.get_success(self.store.set_user_locked_status(user_id, True)) + + # Locked user should appear in list users API + channel = self.make_request( + "GET", + self.url + "?locked=true", + access_token=self.admin_user_tok, + ) + users = {user["name"]: user for user in channel.json_body["users"]} + self.assertIn(user_id, users) + self.assertTrue(users[user_id]["locked"]) + + # Locked user should not appear in list users API + channel = self.make_request( + "GET", + self.url + "?locked=false", + access_token=self.admin_user_tok, + ) + users = {user["name"]: user for user in channel.json_body["users"]} + self.assertNotIn(user_id, users) + def _order_test( self, expected_user_list: List[str], From 47d4bb605740db04222d4cc9f083821b6d839a63 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 10:48:02 -0400 Subject: [PATCH 497/562] Stop patching EventBase.__eq__ in tests. (#16349) It is clearer to directly test equality instead of doing indirect assertions via patching __eq__. --- changelog.d/16349.misc | 1 + tests/replication/storage/_base.py | 17 +++++--- tests/replication/storage/test_events.py | 49 +++++++++--------------- 3 files changed, 31 insertions(+), 36 deletions(-) create mode 100644 changelog.d/16349.misc diff --git a/changelog.d/16349.misc b/changelog.d/16349.misc new file mode 100644 index 0000000000..8ce27a1599 --- /dev/null +++ b/changelog.d/16349.misc @@ -0,0 +1 @@ +Avoid patching code in tests. diff --git a/tests/replication/storage/_base.py b/tests/replication/storage/_base.py index de26a62ae1..afcc80a8b3 100644 --- a/tests/replication/storage/_base.py +++ b/tests/replication/storage/_base.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable, Optional +from typing import Any, Callable, Iterable, Optional from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor @@ -47,24 +47,31 @@ class BaseWorkerStoreTestCase(BaseStreamTestCase): self.pump(0.1) def check( - self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None + self, + method: str, + args: Iterable[Any], + expected_result: Optional[Any] = None, + asserter: Optional[Callable[[Any, Any, Optional[Any]], None]] = None, ) -> None: + if asserter is None: + asserter = self.assertEqual + master_result = self.get_success(getattr(self.master_store, method)(*args)) worker_result = self.get_success(getattr(self.worker_store, method)(*args)) if expected_result is not None: - self.assertEqual( + asserter( master_result, expected_result, "Expected master result to be %r but was %r" % (expected_result, master_result), ) - self.assertEqual( + asserter( worker_result, expected_result, "Expected worker result to be %r but was %r" % (expected_result, worker_result), ) - self.assertEqual( + asserter( master_result, worker_result, "Worker result %r does not match master result %r" diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index 33c277a38a..17716253f8 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Callable, Iterable, List, Optional, Tuple +from typing import Any, Iterable, List, Optional, Tuple from canonicaljson import encode_canonical_json from parameterized import parameterized @@ -21,7 +21,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import ReceiptTypes from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.handlers.room import RoomEventSource from synapse.server import HomeServer @@ -46,32 +46,9 @@ ROOM_ID = "!room:test" logger = logging.getLogger(__name__) -def dict_equals(self: EventBase, other: EventBase) -> bool: - me = encode_canonical_json(self.get_pdu_json()) - them = encode_canonical_json(other.get_pdu_json()) - return me == them - - -def patch__eq__(cls: object) -> Callable[[], None]: - eq = getattr(cls, "__eq__", None) - cls.__eq__ = dict_equals # type: ignore[assignment] - - def unpatch() -> None: - if eq is not None: - cls.__eq__ = eq # type: ignore[method-assign] - - return unpatch - - class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): STORE_TYPE = EventsWorkerStore - def setUp(self) -> None: - # Patch up the equality operator for events so that we can check - # whether lists of events match using assertEqual - self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(EventBase)] - super().setUp() - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) @@ -84,8 +61,14 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): ) ) - def tearDown(self) -> None: - [unpatch() for unpatch in self.unpatches] + def assertEventsEqual( + self, first: EventBase, second: EventBase, msg: Optional[Any] = None + ) -> None: + self.assertEqual( + encode_canonical_json(first.get_pdu_json()), + encode_canonical_json(second.get_pdu_json()), + msg, + ) def test_get_latest_event_ids_in_room(self) -> None: create = self.persist(type="m.room.create", key="", creator=USER_ID) @@ -107,7 +90,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") self.replicate() - self.check("get_event", [msg.event_id], msg) + self.check("get_event", [msg.event_id], msg, asserter=self.assertEventsEqual) redaction = self.persist(type="m.room.redaction", redacts=msg.event_id) self.replicate() @@ -119,7 +102,9 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): redacted = make_event_from_dict( msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() ) - self.check("get_event", [msg.event_id], redacted) + self.check( + "get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual + ) def test_backfilled_redactions(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) @@ -127,7 +112,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") self.replicate() - self.check("get_event", [msg.event_id], msg) + self.check("get_event", [msg.event_id], msg, asserter=self.assertEventsEqual) redaction = self.persist( type="m.room.redaction", redacts=msg.event_id, backfill=True @@ -141,7 +126,9 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): redacted = make_event_from_dict( msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() ) - self.check("get_event", [msg.event_id], redacted) + self.check( + "get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual + ) def test_invites(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) From eee2b6642ddb28a6e0f850b958baa39eb74dc891 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 11:30:43 -0400 Subject: [PATCH 498/562] Bump ruff from 0.0.286 to 0.0.290 (#16342) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Patrick Cloke --- poetry.lock | 38 +++++++++---------- pyproject.toml | 2 +- .../databases/main/event_push_actions.py | 5 +-- synapse/storage/databases/main/events.py | 10 +---- 4 files changed, 22 insertions(+), 33 deletions(-) diff --git a/poetry.lock b/poetry.lock index 17d0993a8b..8264e814b4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2332,28 +2332,28 @@ files = [ [[package]] name = "ruff" -version = "0.0.286" +version = "0.0.290" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.286-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:8e22cb557e7395893490e7f9cfea1073d19a5b1dd337f44fd81359b2767da4e9"}, - {file = "ruff-0.0.286-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:68ed8c99c883ae79a9133cb1a86d7130feee0397fdf5ba385abf2d53e178d3fa"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8301f0bb4ec1a5b29cfaf15b83565136c47abefb771603241af9d6038f8981e8"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acc4598f810bbc465ce0ed84417ac687e392c993a84c7eaf3abf97638701c1ec"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88c8e358b445eb66d47164fa38541cfcc267847d1e7a92dd186dddb1a0a9a17f"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0433683d0c5dbcf6162a4beb2356e820a593243f1fa714072fec15e2e4f4c939"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddb61a0c4454cbe4623f4a07fef03c5ae921fe04fede8d15c6e36703c0a73b07"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47549c7c0be24c8ae9f2bce6f1c49fbafea83bca80142d118306f08ec7414041"}, - {file = "ruff-0.0.286-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:559aa793149ac23dc4310f94f2c83209eedb16908a0343663be19bec42233d25"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d73cfb1c3352e7aa0ce6fb2321f36fa1d4a2c48d2ceac694cb03611ddf0e4db6"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3dad93b1f973c6d1db4b6a5da8690c5625a3fa32bdf38e543a6936e634b83dc3"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26afc0851f4fc3738afcf30f5f8b8612a31ac3455cb76e611deea80f5c0bf3ce"}, - {file = "ruff-0.0.286-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9b6b116d1c4000de1b9bf027131dbc3b8a70507788f794c6b09509d28952c512"}, - {file = "ruff-0.0.286-py3-none-win32.whl", hash = "sha256:556e965ac07c1e8c1c2d759ac512e526ecff62c00fde1a046acb088d3cbc1a6c"}, - {file = "ruff-0.0.286-py3-none-win_amd64.whl", hash = "sha256:5d295c758961376c84aaa92d16e643d110be32add7465e197bfdaec5a431a107"}, - {file = "ruff-0.0.286-py3-none-win_arm64.whl", hash = "sha256:1d6142d53ab7f164204b3133d053c4958d4d11ec3a39abf23a40b13b0784e3f0"}, - {file = "ruff-0.0.286.tar.gz", hash = "sha256:f1e9d169cce81a384a26ee5bb8c919fe9ae88255f39a1a69fd1ebab233a85ed2"}, + {file = "ruff-0.0.290-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:0e2b09ac4213b11a3520221083866a5816616f3ae9da123037b8ab275066fbac"}, + {file = "ruff-0.0.290-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:4ca6285aa77b3d966be32c9a3cd531655b3d4a0171e1f9bf26d66d0372186767"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35e3550d1d9f2157b0fcc77670f7bb59154f223bff281766e61bdd1dd854e0c5"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d748c8bd97874f5751aed73e8dde379ce32d16338123d07c18b25c9a2796574a"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:982af5ec67cecd099e2ef5e238650407fb40d56304910102d054c109f390bf3c"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bbd37352cea4ee007c48a44c9bc45a21f7ba70a57edfe46842e346651e2b995a"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d9be6351b7889462912e0b8185a260c0219c35dfd920fb490c7f256f1d8313e"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75cdc7fe32dcf33b7cec306707552dda54632ac29402775b9e212a3c16aad5e6"}, + {file = "ruff-0.0.290-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb07f37f7aecdbbc91d759c0c09870ce0fb3eed4025eebedf9c4b98c69abd527"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2ab41bc0ba359d3f715fc7b705bdeef19c0461351306b70a4e247f836b9350ed"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:150bf8050214cea5b990945b66433bf9a5e0cef395c9bc0f50569e7de7540c86"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_i686.whl", hash = "sha256:75386ebc15fe5467248c039f5bf6a0cfe7bfc619ffbb8cd62406cd8811815fca"}, + {file = "ruff-0.0.290-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ac93eadf07bc4ab4c48d8bb4e427bf0f58f3a9c578862eb85d99d704669f5da0"}, + {file = "ruff-0.0.290-py3-none-win32.whl", hash = "sha256:461fbd1fb9ca806d4e3d5c745a30e185f7cf3ca77293cdc17abb2f2a990ad3f7"}, + {file = "ruff-0.0.290-py3-none-win_amd64.whl", hash = "sha256:f1f49f5ec967fd5778813780b12a5650ab0ebcb9ddcca28d642c689b36920796"}, + {file = "ruff-0.0.290-py3-none-win_arm64.whl", hash = "sha256:ae5a92dfbdf1f0c689433c223f8dac0782c2b2584bd502dfdbc76475669f1ba1"}, + {file = "ruff-0.0.290.tar.gz", hash = "sha256:949fecbc5467bb11b8db810a7fa53c7e02633856ee6bd1302b2f43adcd71b88d"}, ] [[package]] @@ -3347,4 +3347,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "4a3a82becd89b91e76e2bc2f8ba72123f665c517d9b841d9a34cd01b83a1adc3" +content-hash = "104f108b3c966be05e17cf9975b4061942b354fe9a57cbf7372371fd56b1bf24" diff --git a/pyproject.toml b/pyproject.toml index 7f1e773159..de4dd61ea5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.0.286" +ruff = "0.0.290" # Typechecking lxml-stubs = ">=0.4.0" diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index b958a39aeb..ba99e63d26 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1599,10 +1599,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas txn, table="event_push_summary", key_names=("user_id", "room_id", "thread_id"), - key_values=[ - (user_id, room_id, thread_id) - for user_id, room_id, thread_id in summaries - ], + key_values=list(summaries), value_names=("notif_count", "unread_count", "stream_ordering"), value_values=[ ( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index bc8474a589..790d058c43 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -827,15 +827,7 @@ class PersistEventsStore: "target_chain_id", "target_sequence_number", ), - values=[ - (source_id, source_seq, target_id, target_seq) - for ( - source_id, - source_seq, - target_id, - target_seq, - ) in chain_links.get_additions() - ], + values=list(chain_links.get_additions()), ) @staticmethod From 118036eeabf72c268e53f2b7521698643d486387 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 18 Sep 2023 13:21:00 -0400 Subject: [PATCH 499/562] Test against PostgreSQL 16. (#16351) --- .ci/scripts/calculate_jobs.py | 2 +- changelog.d/16351.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16351.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 08af332b6d..7575683ab4 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -64,7 +64,7 @@ if not IS_PR: { "python-version": "3.11", "database": "postgres", - "postgres-version": "15", + "postgres-version": "16", "extras": "all", } ) diff --git a/changelog.d/16351.misc b/changelog.d/16351.misc new file mode 100644 index 0000000000..b955b3da08 --- /dev/null +++ b/changelog.d/16351.misc @@ -0,0 +1 @@ +Test against PostgreSQL 16. From 83f73d5d6720f25d222ae79ef338e539a0385100 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:26:19 -0400 Subject: [PATCH 500/562] Bump furo from 2023.8.19 to 2023.9.10 (#16340) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8264e814b4..e1b3733c45 100644 --- a/poetry.lock +++ b/poetry.lock @@ -555,13 +555,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", [[package]] name = "furo" -version = "2023.8.19" +version = "2023.9.10" description = "A clean customisable Sphinx documentation theme." optional = false python-versions = ">=3.8" files = [ - {file = "furo-2023.8.19-py3-none-any.whl", hash = "sha256:12f99f87a1873b6746228cfde18f77244e6c1ffb85d7fed95e638aae70d80590"}, - {file = "furo-2023.8.19.tar.gz", hash = "sha256:e671ee638ab3f1b472f4033b0167f502ab407830e0db0f843b1c1028119c9cd1"}, + {file = "furo-2023.9.10-py3-none-any.whl", hash = "sha256:513092538537dc5c596691da06e3c370714ec99bc438680edc1debffb73e5bfc"}, + {file = "furo-2023.9.10.tar.gz", hash = "sha256:5707530a476d2a63b8cad83b4f961f3739a69f4b058bcf38a03a39fa537195b2"}, ] [package.dependencies] From 1f477d65f5602abfa02e19b295b5d8144c10577c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 13:26:35 -0400 Subject: [PATCH 501/562] Bump serde_json from 1.0.106 to 1.0.107 (#16345) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e233b1683..ea9aa18a5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "itoa", "ryu", From 3d60b07cdeb2f91a3d41b6f1190967c1c84bd325 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Tue, 19 Sep 2023 13:24:47 +0200 Subject: [PATCH 502/562] Use string for federation_client_minimum_tls_version documentation examples (#16353) --- changelog.d/16353.doc | 1 + docs/usage/configuration/config_documentation.md | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16353.doc diff --git a/changelog.d/16353.doc b/changelog.d/16353.doc new file mode 100644 index 0000000000..80af22ed53 --- /dev/null +++ b/changelog.d/16353.doc @@ -0,0 +1 @@ +Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 885a7bf0a3..54315a417e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1133,14 +1133,14 @@ federation_verify_certificates: false The minimum TLS version that will be used for outbound federation requests. -Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note -that setting this value higher than `1.2` will prevent federation to most -of the public Matrix network: only configure it to `1.3` if you have an +Defaults to `"1"`. Configurable to `"1"`, `"1.1"`, `"1.2"`, or `"1.3"`. Note +that setting this value higher than `"1.2"` will prevent federation to most +of the public Matrix network: only configure it to `"1.3"` if you have an entirely private federation setup and you can ensure TLS 1.3 support. Example configuration: ```yaml -federation_client_minimum_tls_version: 1.2 +federation_client_minimum_tls_version: "1.2" ``` --- ### `federation_certificate_verification_whitelist` From 9caeb9be10202d15bfd4d3f2cff60793b1d553e2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 07:56:49 -0400 Subject: [PATCH 503/562] 1.93.0rc1 --- CHANGES.md | 80 +++++++++++++++++++++++++++++++++++++++ changelog.d/15488.feature | 1 - changelog.d/15997.misc | 1 - changelog.d/16066.bugfix | 1 - changelog.d/16090.misc | 1 - changelog.d/16137.feature | 1 - changelog.d/16170.bugfix | 1 - changelog.d/16171.bugfix | 1 - changelog.d/16172.bugfix | 1 - changelog.d/16174.bugfix | 1 - changelog.d/16219.feature | 1 - changelog.d/16227.feature | 1 - changelog.d/16235.misc | 1 - changelog.d/16240.misc | 1 - changelog.d/16248.misc | 1 - changelog.d/16251.bugfix | 1 - changelog.d/16252.bugfix | 1 - changelog.d/16257.bugfix | 1 - changelog.d/16260.misc | 1 - changelog.d/16261.misc | 1 - changelog.d/16262.feature | 1 - changelog.d/16263.misc | 1 - changelog.d/16264.misc | 1 - changelog.d/16265.feature | 1 - changelog.d/16272.bugfix | 1 - changelog.d/16273.misc | 1 - changelog.d/16274.feature | 1 - changelog.d/16276.misc | 1 - changelog.d/16277.misc | 1 - changelog.d/16278.misc | 1 - changelog.d/16280.misc | 1 - changelog.d/16281.misc | 1 - changelog.d/16282.doc | 1 - changelog.d/16283.misc | 1 - changelog.d/16288.bugfix | 1 - changelog.d/16298.misc | 1 - changelog.d/16299.misc | 1 - changelog.d/16300.misc | 1 - changelog.d/16301.misc | 1 - changelog.d/16304.doc | 1 - changelog.d/16309.misc | 1 - changelog.d/16311.misc | 1 - changelog.d/16312.misc | 1 - changelog.d/16313.misc | 1 - changelog.d/16314.misc | 1 - changelog.d/16315.misc | 1 - changelog.d/16316.misc | 1 - changelog.d/16318.misc | 1 - changelog.d/16325.misc | 1 - changelog.d/16326.misc | 1 - changelog.d/16327.bugfix | 1 - changelog.d/16328.feature | 1 - changelog.d/16329.bugfix | 1 - changelog.d/16347.misc | 1 - changelog.d/16349.misc | 1 - changelog.d/16351.misc | 1 - changelog.d/16353.doc | 1 - debian/changelog | 6 +++ pyproject.toml | 2 +- 59 files changed, 87 insertions(+), 57 deletions(-) delete mode 100644 changelog.d/15488.feature delete mode 100644 changelog.d/15997.misc delete mode 100644 changelog.d/16066.bugfix delete mode 100644 changelog.d/16090.misc delete mode 100644 changelog.d/16137.feature delete mode 100644 changelog.d/16170.bugfix delete mode 100644 changelog.d/16171.bugfix delete mode 100644 changelog.d/16172.bugfix delete mode 100644 changelog.d/16174.bugfix delete mode 100644 changelog.d/16219.feature delete mode 100644 changelog.d/16227.feature delete mode 100644 changelog.d/16235.misc delete mode 100644 changelog.d/16240.misc delete mode 100644 changelog.d/16248.misc delete mode 100644 changelog.d/16251.bugfix delete mode 100644 changelog.d/16252.bugfix delete mode 100644 changelog.d/16257.bugfix delete mode 100644 changelog.d/16260.misc delete mode 100644 changelog.d/16261.misc delete mode 100644 changelog.d/16262.feature delete mode 100644 changelog.d/16263.misc delete mode 100644 changelog.d/16264.misc delete mode 100644 changelog.d/16265.feature delete mode 100644 changelog.d/16272.bugfix delete mode 100644 changelog.d/16273.misc delete mode 100644 changelog.d/16274.feature delete mode 100644 changelog.d/16276.misc delete mode 100644 changelog.d/16277.misc delete mode 100644 changelog.d/16278.misc delete mode 100644 changelog.d/16280.misc delete mode 100644 changelog.d/16281.misc delete mode 100644 changelog.d/16282.doc delete mode 100644 changelog.d/16283.misc delete mode 100644 changelog.d/16288.bugfix delete mode 100644 changelog.d/16298.misc delete mode 100644 changelog.d/16299.misc delete mode 100644 changelog.d/16300.misc delete mode 100644 changelog.d/16301.misc delete mode 100644 changelog.d/16304.doc delete mode 100644 changelog.d/16309.misc delete mode 100644 changelog.d/16311.misc delete mode 100644 changelog.d/16312.misc delete mode 100644 changelog.d/16313.misc delete mode 100644 changelog.d/16314.misc delete mode 100644 changelog.d/16315.misc delete mode 100644 changelog.d/16316.misc delete mode 100644 changelog.d/16318.misc delete mode 100644 changelog.d/16325.misc delete mode 100644 changelog.d/16326.misc delete mode 100644 changelog.d/16327.bugfix delete mode 100644 changelog.d/16328.feature delete mode 100644 changelog.d/16329.bugfix delete mode 100644 changelog.d/16347.misc delete mode 100644 changelog.d/16349.misc delete mode 100644 changelog.d/16351.misc delete mode 100644 changelog.d/16353.doc diff --git a/CHANGES.md b/CHANGES.md index b59503e083..f95a894c4f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,83 @@ +# Synapse 1.93.0rc1 (2023-09-19) + +### Features + +- Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) +- Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) +- Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) +- Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) +- Allow `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) +- Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) +- Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) + +### Bugfixes + +- Fix a long-standing bug where multi-device accounts could cause high load due to presence. ([\#16066](https://github.com/matrix-org/synapse/issues/16066), [\#16170](https://github.com/matrix-org/synapse/issues/16170), [\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172), [\#16174](https://github.com/matrix-org/synapse/issues/16174)) +- Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) +- Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. ([\#16252](https://github.com/matrix-org/synapse/issues/16252)) +- Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. ([\#16257](https://github.com/matrix-org/synapse/issues/16257)) +- Avoid temporary storage of sensitive information. ([\#16272](https://github.com/matrix-org/synapse/issues/16272)) +- Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. ([\#16288](https://github.com/matrix-org/synapse/issues/16288)) +- Fix a long-standing bug where invalid receipts would be accepted. ([\#16327](https://github.com/matrix-org/synapse/issues/16327)) +- Use standard name for UTF-8 charset in emails. ([\#16329](https://github.com/matrix-org/synapse/issues/16329)) + +### Improved Documentation + +- Fix typos in the documentation. ([\#16282](https://github.com/matrix-org/synapse/issues/16282)) +- Link to the Alpine Linux community package for Synapse. ([\#16304](https://github.com/matrix-org/synapse/issues/16304)) +- Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) + +### Internal Changes + +- Allow modules to delete rooms. ([\#15997](https://github.com/matrix-org/synapse/issues/15997)) +- Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. ([\#16090](https://github.com/matrix-org/synapse/issues/16090), [\#16263](https://github.com/matrix-org/synapse/issues/16263)) +- Fix type checking when using the new version of Twisted. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) +- Delete device messages asynchronously and in staged batches using the task scheduler. ([\#16240](https://github.com/matrix-org/synapse/issues/16240), [\#16311](https://github.com/matrix-org/synapse/issues/16311), [\#16312](https://github.com/matrix-org/synapse/issues/16312), [\#16313](https://github.com/matrix-org/synapse/issues/16313)) +- Bump minimum supported Rust version to 1.61.0. ([\#16248](https://github.com/matrix-org/synapse/issues/16248)) +- Update rust to version 1.71.1 in the nix development environment. ([\#16260](https://github.com/matrix-org/synapse/issues/16260)) +- Simplify server key storage. ([\#16261](https://github.com/matrix-org/synapse/issues/16261)) +- Reduce CPU overhead of change password endpoint. ([\#16264](https://github.com/matrix-org/synapse/issues/16264)) +- Stop purging from tables slated for removal. ([\#16273](https://github.com/matrix-org/synapse/issues/16273)) +- Improve type hints. ([\#16276](https://github.com/matrix-org/synapse/issues/16276), [\#16301](https://github.com/matrix-org/synapse/issues/16301), [\#16325](https://github.com/matrix-org/synapse/issues/16325), [\#16326](https://github.com/matrix-org/synapse/issues/16326)) +- Raise setuptools_rust version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) +- Fix using the new task scheduler causing lots of CPU to be used. ([\#16278](https://github.com/matrix-org/synapse/issues/16278)) +- Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) +- Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) +- Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) +- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) +- Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) +- Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) +- Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) +- Remove a reference cycle for in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) +- Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) +- Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) +- Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) +- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347)) +- Avoid patching code in tests. ([\#16349](https://github.com/matrix-org/synapse/issues/16349)) +- Test against PostgreSQL 16. ([\#16351](https://github.com/matrix-org/synapse/issues/16351)) + +### Updates to locked dependencies + +* Bump black from 23.7.0 to 23.9.1. ([\#16295](https://github.com/matrix-org/synapse/issues/16295)) +* Bump docker/build-push-action from 4 to 5. ([\#16336](https://github.com/matrix-org/synapse/issues/16336)) +* Bump docker/login-action from 2 to 3. ([\#16339](https://github.com/matrix-org/synapse/issues/16339)) +* Bump docker/metadata-action from 4 to 5. ([\#16337](https://github.com/matrix-org/synapse/issues/16337)) +* Bump docker/setup-qemu-action from 2 to 3. ([\#16338](https://github.com/matrix-org/synapse/issues/16338)) +* Bump furo from 2023.8.19 to 2023.9.10. ([\#16340](https://github.com/matrix-org/synapse/issues/16340)) +* Bump gitpython from 3.1.32 to 3.1.34. ([\#16267](https://github.com/matrix-org/synapse/issues/16267)) +* Bump gitpython from 3.1.34 to 3.1.35. ([\#16279](https://github.com/matrix-org/synapse/issues/16279)) +* Bump mypy-zope from 1.0.0 to 1.0.1. ([\#16291](https://github.com/matrix-org/synapse/issues/16291)) +* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) +* Bump regex from 1.9.4 to 1.9.5. ([\#16233](https://github.com/matrix-org/synapse/issues/16233)) +* Bump ruff from 0.0.286 to 0.0.290. ([\#16342](https://github.com/matrix-org/synapse/issues/16342)) +* Bump serde_json from 1.0.105 to 1.0.106. ([\#16296](https://github.com/matrix-org/synapse/issues/16296)) +* Bump serde_json from 1.0.106 to 1.0.107. ([\#16345](https://github.com/matrix-org/synapse/issues/16345)) +* Bump twisted from 22.10.0 to 23.8.0. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) +* Bump types-pillow from 10.0.0.2 to 10.0.0.3. ([\#16293](https://github.com/matrix-org/synapse/issues/16293)) +* Bump types-setuptools from 68.0.0.3 to 68.2.0.0. ([\#16292](https://github.com/matrix-org/synapse/issues/16292)) +* Bump typing-extensions from 4.7.1 to 4.8.0. ([\#16341](https://github.com/matrix-org/synapse/issues/16341)) + # Synapse 1.92.3 (2023-09-18) This is again a security update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863). diff --git a/changelog.d/15488.feature b/changelog.d/15488.feature deleted file mode 100644 index 8684d84192..0000000000 --- a/changelog.d/15488.feature +++ /dev/null @@ -1 +0,0 @@ -Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. diff --git a/changelog.d/15997.misc b/changelog.d/15997.misc deleted file mode 100644 index 94768c3cb8..0000000000 --- a/changelog.d/15997.misc +++ /dev/null @@ -1 +0,0 @@ -Allow modules to delete rooms. \ No newline at end of file diff --git a/changelog.d/16066.bugfix b/changelog.d/16066.bugfix deleted file mode 100644 index 83649cf42a..0000000000 --- a/changelog.d/16066.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16090.misc b/changelog.d/16090.misc deleted file mode 100644 index d54ef936c7..0000000000 --- a/changelog.d/16090.misc +++ /dev/null @@ -1 +0,0 @@ -Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/changelog.d/16137.feature b/changelog.d/16137.feature deleted file mode 100644 index bba6f161cd..0000000000 --- a/changelog.d/16137.feature +++ /dev/null @@ -1 +0,0 @@ -Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). diff --git a/changelog.d/16170.bugfix b/changelog.d/16170.bugfix deleted file mode 100644 index 83649cf42a..0000000000 --- a/changelog.d/16170.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16171.bugfix b/changelog.d/16171.bugfix deleted file mode 100644 index 83649cf42a..0000000000 --- a/changelog.d/16171.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16172.bugfix b/changelog.d/16172.bugfix deleted file mode 100644 index 83649cf42a..0000000000 --- a/changelog.d/16172.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16174.bugfix b/changelog.d/16174.bugfix deleted file mode 100644 index 83649cf42a..0000000000 --- a/changelog.d/16174.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where multi-device accounts could cause high load due to presence. diff --git a/changelog.d/16219.feature b/changelog.d/16219.feature deleted file mode 100644 index c789f2abb7..0000000000 --- a/changelog.d/16219.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. \ No newline at end of file diff --git a/changelog.d/16227.feature b/changelog.d/16227.feature deleted file mode 100644 index 510062b622..0000000000 --- a/changelog.d/16227.feature +++ /dev/null @@ -1 +0,0 @@ -Add span information to requests sent to appservices. Contributed by MTRNord. \ No newline at end of file diff --git a/changelog.d/16235.misc b/changelog.d/16235.misc deleted file mode 100644 index b1533f93b6..0000000000 --- a/changelog.d/16235.misc +++ /dev/null @@ -1 +0,0 @@ -Fix type checking when using the new version of Twisted. diff --git a/changelog.d/16240.misc b/changelog.d/16240.misc deleted file mode 100644 index 4f266c1fb0..0000000000 --- a/changelog.d/16240.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16248.misc b/changelog.d/16248.misc deleted file mode 100644 index 0a5ed6dccb..0000000000 --- a/changelog.d/16248.misc +++ /dev/null @@ -1 +0,0 @@ -Bump minimum supported Rust version to 1.61.0. diff --git a/changelog.d/16251.bugfix b/changelog.d/16251.bugfix deleted file mode 100644 index 6d3157c7aa..0000000000 --- a/changelog.d/16251.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. \ No newline at end of file diff --git a/changelog.d/16252.bugfix b/changelog.d/16252.bugfix deleted file mode 100644 index 881bc00e61..0000000000 --- a/changelog.d/16252.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. diff --git a/changelog.d/16257.bugfix b/changelog.d/16257.bugfix deleted file mode 100644 index 28a5319749..0000000000 --- a/changelog.d/16257.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. diff --git a/changelog.d/16260.misc b/changelog.d/16260.misc deleted file mode 100644 index 9f3289d7d4..0000000000 --- a/changelog.d/16260.misc +++ /dev/null @@ -1 +0,0 @@ -Update rust to version 1.71.1 in the nix development environment. \ No newline at end of file diff --git a/changelog.d/16261.misc b/changelog.d/16261.misc deleted file mode 100644 index d3ad59ca4a..0000000000 --- a/changelog.d/16261.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify server key storage. diff --git a/changelog.d/16262.feature b/changelog.d/16262.feature deleted file mode 100644 index 7c8e7e349b..0000000000 --- a/changelog.d/16262.feature +++ /dev/null @@ -1 +0,0 @@ -Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. diff --git a/changelog.d/16263.misc b/changelog.d/16263.misc deleted file mode 100644 index d54ef936c7..0000000000 --- a/changelog.d/16263.misc +++ /dev/null @@ -1 +0,0 @@ -Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. \ No newline at end of file diff --git a/changelog.d/16264.misc b/changelog.d/16264.misc deleted file mode 100644 index a744434bef..0000000000 --- a/changelog.d/16264.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce CPU overhead of change password endpoint. diff --git a/changelog.d/16265.feature b/changelog.d/16265.feature deleted file mode 100644 index 3ffa16dbcb..0000000000 --- a/changelog.d/16265.feature +++ /dev/null @@ -1 +0,0 @@ -Allow `/notifications` endpoint to be routed to workers. diff --git a/changelog.d/16272.bugfix b/changelog.d/16272.bugfix deleted file mode 100644 index afb22a999f..0000000000 --- a/changelog.d/16272.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid temporary storage of sensitive information. diff --git a/changelog.d/16273.misc b/changelog.d/16273.misc deleted file mode 100644 index 19882f6754..0000000000 --- a/changelog.d/16273.misc +++ /dev/null @@ -1 +0,0 @@ -Stop purging from tables slated for removal. diff --git a/changelog.d/16274.feature b/changelog.d/16274.feature deleted file mode 100644 index 0d9da2bbef..0000000000 --- a/changelog.d/16274.feature +++ /dev/null @@ -1 +0,0 @@ -Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. diff --git a/changelog.d/16276.misc b/changelog.d/16276.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16276.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16277.misc b/changelog.d/16277.misc deleted file mode 100644 index c131a46ec3..0000000000 --- a/changelog.d/16277.misc +++ /dev/null @@ -1 +0,0 @@ -Raise setuptools_rust version cap to 1.7.0. diff --git a/changelog.d/16278.misc b/changelog.d/16278.misc deleted file mode 100644 index e82a470c45..0000000000 --- a/changelog.d/16278.misc +++ /dev/null @@ -1 +0,0 @@ -Fix using the new task scheduler causing lots of CPU to be used. diff --git a/changelog.d/16280.misc b/changelog.d/16280.misc deleted file mode 100644 index 2d8b414a3b..0000000000 --- a/changelog.d/16280.misc +++ /dev/null @@ -1 +0,0 @@ -Upgrade CI run of Python 3.12 from rc1 to rc2. diff --git a/changelog.d/16281.misc b/changelog.d/16281.misc deleted file mode 100644 index de48396aff..0000000000 --- a/changelog.d/16281.misc +++ /dev/null @@ -1 +0,0 @@ -Include values in SQL debug when using `execute_values` with Postgres. diff --git a/changelog.d/16282.doc b/changelog.d/16282.doc deleted file mode 100644 index b249ea4f9f..0000000000 --- a/changelog.d/16282.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typos in the documentation. diff --git a/changelog.d/16283.misc b/changelog.d/16283.misc deleted file mode 100644 index 4b9d6f76ae..0000000000 --- a/changelog.d/16283.misc +++ /dev/null @@ -1 +0,0 @@ -Enable additional linting checks. diff --git a/changelog.d/16288.bugfix b/changelog.d/16288.bugfix deleted file mode 100644 index f08d10d1f3..0000000000 --- a/changelog.d/16288.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. diff --git a/changelog.d/16298.misc b/changelog.d/16298.misc deleted file mode 100644 index 75b546d424..0000000000 --- a/changelog.d/16298.misc +++ /dev/null @@ -1 +0,0 @@ -Don't try refetching device lists for users on remote hosts that are marked as "down". diff --git a/changelog.d/16299.misc b/changelog.d/16299.misc deleted file mode 100644 index d454669151..0000000000 --- a/changelog.d/16299.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `receipts_graph` Postgres transactions to stop error messages. diff --git a/changelog.d/16300.misc b/changelog.d/16300.misc deleted file mode 100644 index 8cc2e52369..0000000000 --- a/changelog.d/16300.misc +++ /dev/null @@ -1 +0,0 @@ -Bump mypy from 1.4.1 to 1.5.1. diff --git a/changelog.d/16301.misc b/changelog.d/16301.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16301.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16304.doc b/changelog.d/16304.doc deleted file mode 100644 index 53660ec9a4..0000000000 --- a/changelog.d/16304.doc +++ /dev/null @@ -1 +0,0 @@ -Link to the Alpine Linux community package for Synapse. diff --git a/changelog.d/16309.misc b/changelog.d/16309.misc deleted file mode 100644 index bef5563ee9..0000000000 --- a/changelog.d/16309.misc +++ /dev/null @@ -1 +0,0 @@ -Small improvements to logging in replication code. diff --git a/changelog.d/16311.misc b/changelog.d/16311.misc deleted file mode 100644 index 4f266c1fb0..0000000000 --- a/changelog.d/16311.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16312.misc b/changelog.d/16312.misc deleted file mode 100644 index 4f266c1fb0..0000000000 --- a/changelog.d/16312.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16313.misc b/changelog.d/16313.misc deleted file mode 100644 index 4f266c1fb0..0000000000 --- a/changelog.d/16313.misc +++ /dev/null @@ -1 +0,0 @@ -Delete device messages asynchronously and in staged batches using the task scheduler. diff --git a/changelog.d/16314.misc b/changelog.d/16314.misc deleted file mode 100644 index a32b07112a..0000000000 --- a/changelog.d/16314.misc +++ /dev/null @@ -1 +0,0 @@ -Remove a reference cycle for in background processes. diff --git a/changelog.d/16315.misc b/changelog.d/16315.misc deleted file mode 100644 index d88782c979..0000000000 --- a/changelog.d/16315.misc +++ /dev/null @@ -1 +0,0 @@ -Only use literal strings for background process names. diff --git a/changelog.d/16316.misc b/changelog.d/16316.misc deleted file mode 100644 index aa0644f278..0000000000 --- a/changelog.d/16316.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `get_user_by_id`. diff --git a/changelog.d/16318.misc b/changelog.d/16318.misc deleted file mode 100644 index 1433a2f246..0000000000 --- a/changelog.d/16318.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up task to delete to-device messages. diff --git a/changelog.d/16325.misc b/changelog.d/16325.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16325.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16326.misc b/changelog.d/16326.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16326.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16327.bugfix b/changelog.d/16327.bugfix deleted file mode 100644 index be3d1b4f21..0000000000 --- a/changelog.d/16327.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where invalid receipts would be accepted. diff --git a/changelog.d/16328.feature b/changelog.d/16328.feature deleted file mode 100644 index 9fadf766cc..0000000000 --- a/changelog.d/16328.feature +++ /dev/null @@ -1 +0,0 @@ -Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. diff --git a/changelog.d/16329.bugfix b/changelog.d/16329.bugfix deleted file mode 100644 index 2f1f7e8ffe..0000000000 --- a/changelog.d/16329.bugfix +++ /dev/null @@ -1 +0,0 @@ -Use standard name for UTF-8 charset in emails. diff --git a/changelog.d/16347.misc b/changelog.d/16347.misc deleted file mode 100644 index f4f5bfb2de..0000000000 --- a/changelog.d/16347.misc +++ /dev/null @@ -1 +0,0 @@ -Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. diff --git a/changelog.d/16349.misc b/changelog.d/16349.misc deleted file mode 100644 index 8ce27a1599..0000000000 --- a/changelog.d/16349.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid patching code in tests. diff --git a/changelog.d/16351.misc b/changelog.d/16351.misc deleted file mode 100644 index b955b3da08..0000000000 --- a/changelog.d/16351.misc +++ /dev/null @@ -1 +0,0 @@ -Test against PostgreSQL 16. diff --git a/changelog.d/16353.doc b/changelog.d/16353.doc deleted file mode 100644 index 80af22ed53..0000000000 --- a/changelog.d/16353.doc +++ /dev/null @@ -1 +0,0 @@ -Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. diff --git a/debian/changelog b/debian/changelog index 254ca26fd8..192eedd45c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.93.0~rc1) stable; urgency=medium + + * New synapse release 1.93.0rc1. + + -- Synapse Packaging team Tue, 19 Sep 2023 11:55:00 +0000 + matrix-synapse-py3 (1.92.3) stable; urgency=medium * New Synapse release 1.92.3. diff --git a/pyproject.toml b/pyproject.toml index de4dd61ea5..f69336a73f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.92.3" +version = "1.93.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From ac293357d05467703c505a50355db79cd8f1911a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 08:03:38 -0400 Subject: [PATCH 504/562] Tweak changelog. --- CHANGES.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f95a894c4f..7b150f3749 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,11 +2,11 @@ ### Features -- Add automatic purge after all users forgotten a room. Also add restore of purge/shutdown rooms after a synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Add automatic purge after all users have forgotten a room. Also restores purge/shutdown rooms after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) - Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) - Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) - Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) -- Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) +- Add the ability to enable/disable registrations when using CAS. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) - Allow `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) - Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) - Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) @@ -14,7 +14,7 @@ ### Bugfixes - Fix a long-standing bug where multi-device accounts could cause high load due to presence. ([\#16066](https://github.com/matrix-org/synapse/issues/16066), [\#16170](https://github.com/matrix-org/synapse/issues/16170), [\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172), [\#16174](https://github.com/matrix-org/synapse/issues/16174)) -- Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) +- Fix a long-standing bug where appservices using [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) to receive `to_device` messages would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) - Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. ([\#16252](https://github.com/matrix-org/synapse/issues/16252)) - Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. ([\#16257](https://github.com/matrix-org/synapse/issues/16257)) - Avoid temporary storage of sensitive information. ([\#16272](https://github.com/matrix-org/synapse/issues/16272)) @@ -26,7 +26,7 @@ - Fix typos in the documentation. ([\#16282](https://github.com/matrix-org/synapse/issues/16282)) - Link to the Alpine Linux community package for Synapse. ([\#16304](https://github.com/matrix-org/synapse/issues/16304)) -- Use string for federation_client_minimum_tls_version documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) +- Use string for `federation_client_minimum_tls_version` documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) ### Internal Changes @@ -40,39 +40,36 @@ - Reduce CPU overhead of change password endpoint. ([\#16264](https://github.com/matrix-org/synapse/issues/16264)) - Stop purging from tables slated for removal. ([\#16273](https://github.com/matrix-org/synapse/issues/16273)) - Improve type hints. ([\#16276](https://github.com/matrix-org/synapse/issues/16276), [\#16301](https://github.com/matrix-org/synapse/issues/16301), [\#16325](https://github.com/matrix-org/synapse/issues/16325), [\#16326](https://github.com/matrix-org/synapse/issues/16326)) -- Raise setuptools_rust version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) +- Raise `setuptools_rust` version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) - Fix using the new task scheduler causing lots of CPU to be used. ([\#16278](https://github.com/matrix-org/synapse/issues/16278)) - Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) - Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) - Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) - Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) - Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) -- Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) - Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) - Remove a reference cycle for in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) - Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) - Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) - Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) -- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347)) - Avoid patching code in tests. ([\#16349](https://github.com/matrix-org/synapse/issues/16349)) - Test against PostgreSQL 16. ([\#16351](https://github.com/matrix-org/synapse/issues/16351)) ### Updates to locked dependencies +* Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) * Bump black from 23.7.0 to 23.9.1. ([\#16295](https://github.com/matrix-org/synapse/issues/16295)) * Bump docker/build-push-action from 4 to 5. ([\#16336](https://github.com/matrix-org/synapse/issues/16336)) * Bump docker/login-action from 2 to 3. ([\#16339](https://github.com/matrix-org/synapse/issues/16339)) * Bump docker/metadata-action from 4 to 5. ([\#16337](https://github.com/matrix-org/synapse/issues/16337)) * Bump docker/setup-qemu-action from 2 to 3. ([\#16338](https://github.com/matrix-org/synapse/issues/16338)) * Bump furo from 2023.8.19 to 2023.9.10. ([\#16340](https://github.com/matrix-org/synapse/issues/16340)) -* Bump gitpython from 3.1.32 to 3.1.34. ([\#16267](https://github.com/matrix-org/synapse/issues/16267)) -* Bump gitpython from 3.1.34 to 3.1.35. ([\#16279](https://github.com/matrix-org/synapse/issues/16279)) +* Bump gitpython from 3.1.32 to 3.1.35. ([\#16267](https://github.com/matrix-org/synapse/issues/16267), [\#16279](https://github.com/matrix-org/synapse/issues/16279)) * Bump mypy-zope from 1.0.0 to 1.0.1. ([\#16291](https://github.com/matrix-org/synapse/issues/16291)) * Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) * Bump regex from 1.9.4 to 1.9.5. ([\#16233](https://github.com/matrix-org/synapse/issues/16233)) * Bump ruff from 0.0.286 to 0.0.290. ([\#16342](https://github.com/matrix-org/synapse/issues/16342)) -* Bump serde_json from 1.0.105 to 1.0.106. ([\#16296](https://github.com/matrix-org/synapse/issues/16296)) -* Bump serde_json from 1.0.106 to 1.0.107. ([\#16345](https://github.com/matrix-org/synapse/issues/16345)) +* Bump serde_json from 1.0.105 to 1.0.107. ([\#16296](https://github.com/matrix-org/synapse/issues/16296), [\#16345](https://github.com/matrix-org/synapse/issues/16345)) * Bump twisted from 22.10.0 to 23.8.0. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) * Bump types-pillow from 10.0.0.2 to 10.0.0.3. ([\#16293](https://github.com/matrix-org/synapse/issues/16293)) * Bump types-setuptools from 68.0.0.3 to 68.2.0.0. ([\#16292](https://github.com/matrix-org/synapse/issues/16292)) From 9fabde6eadea526c92a9ca7cd960b879e8995db6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 08:05:33 -0400 Subject: [PATCH 505/562] Fix-up deactivated notes in docs. (#16355) --- changelog.d/16355.doc | 1 + docs/admin_api/user_admin_api.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16355.doc diff --git a/changelog.d/16355.doc b/changelog.d/16355.doc new file mode 100644 index 0000000000..73d29c7889 --- /dev/null +++ b/changelog.d/16355.doc @@ -0,0 +1 @@ +Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index f83facabe4..b91848dd27 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -148,7 +148,6 @@ Body parameters: - `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator, granting them access to the Admin API, among other things. - `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged. -- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged. Note: the `password` field must also be set if both of the following are true: - `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user) @@ -158,6 +157,7 @@ Body parameters: Note: a user cannot be erased with this API. For more details on deactivating and erasing users see [Deactivate Account](#deactivate-account). +- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged. - `user_type` - **string** or null, optional. If not provided, the user type will be not be changed. If `null` is given, the user type will be cleared. Other allowed options are: `bot` and `support`. From 4345ca066dc82b2d00da78e60af0184dd2833d9a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 08:35:44 -0400 Subject: [PATCH 506/562] Update changelog. --- CHANGES.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7b150f3749..eb537f9f6a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,12 +2,13 @@ ### Features -- Add automatic purge after all users have forgotten a room. Also restores purge/shutdown rooms after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Add automatic purge after all users have forgotten a room. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Restore room purge/shutdown after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) - Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) - Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) - Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) - Add the ability to enable/disable registrations when using CAS. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) -- Allow `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) +- Allow the `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) - Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) - Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) @@ -21,6 +22,7 @@ - Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. ([\#16288](https://github.com/matrix-org/synapse/issues/16288)) - Fix a long-standing bug where invalid receipts would be accepted. ([\#16327](https://github.com/matrix-org/synapse/issues/16327)) - Use standard name for UTF-8 charset in emails. ([\#16329](https://github.com/matrix-org/synapse/issues/16329)) +- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) ### Improved Documentation @@ -45,10 +47,9 @@ - Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) - Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) - Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) -- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) - Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) - Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) -- Remove a reference cycle for in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) +- Remove a reference cycle in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) - Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) - Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) - Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) From d7c89c5908f714aa6a142a89da08fafc597ffe0e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 19 Sep 2023 15:26:44 -0400 Subject: [PATCH 507/562] Return immutable objects for cachedList decorators (#16350) --- changelog.d/16350.misc | 1 + synapse/appservice/__init__.py | 6 ++--- synapse/appservice/api.py | 6 ++--- synapse/appservice/scheduler.py | 18 ++++++------- synapse/handlers/appservice.py | 9 ++++--- synapse/handlers/e2e_keys.py | 24 +++++++----------- synapse/handlers/initial_sync.py | 3 ++- synapse/handlers/receipts.py | 13 +++++----- synapse/handlers/sync.py | 4 +-- synapse/handlers/typing.py | 17 +++++++++---- synapse/push/bulk_push_rule_evaluator.py | 2 +- synapse/storage/databases/main/appservice.py | 6 ++--- synapse/storage/databases/main/devices.py | 23 +++++++++++------ .../storage/databases/main/end_to_end_keys.py | 25 +++++++++---------- .../storage/databases/main/events_worker.py | 5 ++-- synapse/storage/databases/main/keys.py | 6 ++--- synapse/storage/databases/main/presence.py | 14 +++++++++-- synapse/storage/databases/main/push_rule.py | 2 +- synapse/storage/databases/main/receipts.py | 14 +++++------ synapse/storage/databases/main/relations.py | 6 ++--- synapse/storage/databases/main/roommember.py | 8 +++--- synapse/storage/databases/main/state.py | 14 +++++++++-- .../storage/databases/main/transactions.py | 4 +-- .../databases/main/user_erasure_store.py | 4 +-- 24 files changed, 134 insertions(+), 100 deletions(-) create mode 100644 changelog.d/16350.misc diff --git a/changelog.d/16350.misc b/changelog.d/16350.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16350.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 2260a8f589..6f4aa53c93 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -23,7 +23,7 @@ from netaddr import IPSet from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.types import DeviceListUpdates, JsonDict, UserID +from synapse.types import DeviceListUpdates, JsonDict, JsonMapping, UserID from synapse.util.caches.descriptors import _CacheContext, cached if TYPE_CHECKING: @@ -379,8 +379,8 @@ class AppServiceTransaction: service: ApplicationService, id: int, events: Sequence[EventBase], - ephemeral: List[JsonDict], - to_device_messages: List[JsonDict], + ephemeral: List[JsonMapping], + to_device_messages: List[JsonMapping], one_time_keys_count: TransactionOneTimeKeysCount, unused_fallback_keys: TransactionUnusedFallbackKeys, device_list_summary: DeviceListUpdates, diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index b1523be208..c42e1f11aa 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -41,7 +41,7 @@ from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig, serialize_event from synapse.http.client import SimpleHttpClient, is_unknown_endpoint from synapse.logging import opentracing -from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID +from synapse.types import DeviceListUpdates, JsonDict, JsonMapping, ThirdPartyInstanceID from synapse.util.caches.response_cache import ResponseCache if TYPE_CHECKING: @@ -306,8 +306,8 @@ class ApplicationServiceApi(SimpleHttpClient): self, service: "ApplicationService", events: Sequence[EventBase], - ephemeral: List[JsonDict], - to_device_messages: List[JsonDict], + ephemeral: List[JsonMapping], + to_device_messages: List[JsonMapping], one_time_keys_count: TransactionOneTimeKeysCount, unused_fallback_keys: TransactionUnusedFallbackKeys, device_list_summary: DeviceListUpdates, diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 79f95f7653..18a30bc376 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -73,7 +73,7 @@ from synapse.events import EventBase from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases.main import DataStore -from synapse.types import DeviceListUpdates, JsonDict +from synapse.types import DeviceListUpdates, JsonMapping from synapse.util import Clock if TYPE_CHECKING: @@ -121,8 +121,8 @@ class ApplicationServiceScheduler: self, appservice: ApplicationService, events: Optional[Collection[EventBase]] = None, - ephemeral: Optional[Collection[JsonDict]] = None, - to_device_messages: Optional[Collection[JsonDict]] = None, + ephemeral: Optional[Collection[JsonMapping]] = None, + to_device_messages: Optional[Collection[JsonMapping]] = None, device_list_summary: Optional[DeviceListUpdates] = None, ) -> None: """ @@ -180,9 +180,9 @@ class _ServiceQueuer: # dict of {service_id: [events]} self.queued_events: Dict[str, List[EventBase]] = {} # dict of {service_id: [events]} - self.queued_ephemeral: Dict[str, List[JsonDict]] = {} + self.queued_ephemeral: Dict[str, List[JsonMapping]] = {} # dict of {service_id: [to_device_message_json]} - self.queued_to_device_messages: Dict[str, List[JsonDict]] = {} + self.queued_to_device_messages: Dict[str, List[JsonMapping]] = {} # dict of {service_id: [device_list_summary]} self.queued_device_list_summaries: Dict[str, List[DeviceListUpdates]] = {} @@ -293,8 +293,8 @@ class _ServiceQueuer: self, service: ApplicationService, events: Iterable[EventBase], - ephemerals: Iterable[JsonDict], - to_device_messages: Iterable[JsonDict], + ephemerals: Iterable[JsonMapping], + to_device_messages: Iterable[JsonMapping], ) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]: """ Given a list of the events, ephemeral messages and to-device messages, @@ -364,8 +364,8 @@ class _TransactionController: self, service: ApplicationService, events: Sequence[EventBase], - ephemeral: Optional[List[JsonDict]] = None, - to_device_messages: Optional[List[JsonDict]] = None, + ephemeral: Optional[List[JsonMapping]] = None, + to_device_messages: Optional[List[JsonMapping]] = None, one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None, unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None, device_list_summary: Optional[DeviceListUpdates] = None, diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 6429545c98..7de7bd3289 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -46,6 +46,7 @@ from synapse.storage.databases.main.directory import RoomAliasMapping from synapse.types import ( DeviceListUpdates, JsonDict, + JsonMapping, RoomAlias, RoomStreamToken, StreamKeyType, @@ -397,7 +398,7 @@ class ApplicationServicesHandler: async def _handle_typing( self, service: ApplicationService, new_token: int - ) -> List[JsonDict]: + ) -> List[JsonMapping]: """ Return the typing events since the given stream token that the given application service should receive. @@ -432,7 +433,7 @@ class ApplicationServicesHandler: async def _handle_receipts( self, service: ApplicationService, new_token: int - ) -> List[JsonDict]: + ) -> List[JsonMapping]: """ Return the latest read receipts that the given application service should receive. @@ -471,7 +472,7 @@ class ApplicationServicesHandler: service: ApplicationService, users: Collection[Union[str, UserID]], new_token: Optional[int], - ) -> List[JsonDict]: + ) -> List[JsonMapping]: """ Return the latest presence updates that the given application service should receive. @@ -491,7 +492,7 @@ class ApplicationServicesHandler: A list of json dictionaries containing data derived from the presence events that should be sent to the given application service. """ - events: List[JsonDict] = [] + events: List[JsonMapping] = [] presence_source = self.event_sources.sources.presence from_key = await self.store.get_type_stream_id_for_appservice( service, "presence" diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index ad075497c8..8c6432035d 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple +from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Tuple import attr from canonicaljson import encode_canonical_json @@ -31,6 +31,7 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace from synapse.types import ( JsonDict, + JsonMapping, UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -272,11 +273,7 @@ class E2eKeysHandler: delay_cancellation=True, ) - ret = {"device_keys": results, "failures": failures} - - ret.update(cross_signing_keys) - - return ret + return {"device_keys": results, "failures": failures, **cross_signing_keys} @trace async def _query_devices_for_destination( @@ -408,7 +405,7 @@ class E2eKeysHandler: @cancellable async def get_cross_signing_keys_from_cache( self, query: Iterable[str], from_user_id: Optional[str] - ) -> Dict[str, Dict[str, dict]]: + ) -> Dict[str, Dict[str, JsonMapping]]: """Get cross-signing keys for users from the database Args: @@ -551,16 +548,13 @@ class E2eKeysHandler: self.config.federation.allow_device_name_lookup_over_federation ), ) - ret = {"device_keys": res} # add in the cross-signing keys cross_signing_keys = await self.get_cross_signing_keys_from_cache( device_keys_query, None ) - ret.update(cross_signing_keys) - - return ret + return {"device_keys": res, **cross_signing_keys} async def claim_local_one_time_keys( self, @@ -1127,7 +1121,7 @@ class E2eKeysHandler: user_id: str, master_key_id: str, signed_master_key: JsonDict, - stored_master_key: JsonDict, + stored_master_key: JsonMapping, devices: Dict[str, Dict[str, JsonDict]], ) -> List["SignatureListItem"]: """Check signatures of a user's master key made by their devices. @@ -1278,7 +1272,7 @@ class E2eKeysHandler: async def _get_e2e_cross_signing_verify_key( self, user_id: str, key_type: str, from_user_id: Optional[str] = None - ) -> Tuple[JsonDict, str, VerifyKey]: + ) -> Tuple[JsonMapping, str, VerifyKey]: """Fetch locally or remotely query for a cross-signing public key. First, attempt to fetch the cross-signing public key from storage. @@ -1333,7 +1327,7 @@ class E2eKeysHandler: self, user: UserID, desired_key_type: str, - ) -> Optional[Tuple[Dict[str, Any], str, VerifyKey]]: + ) -> Optional[Tuple[JsonMapping, str, VerifyKey]]: """Queries cross-signing keys for a remote user and saves them to the database Only the key specified by `key_type` will be returned, while all retrieved keys @@ -1474,7 +1468,7 @@ def _check_device_signature( user_id: str, verify_key: VerifyKey, signed_device: JsonDict, - stored_device: JsonDict, + stored_device: JsonMapping, ) -> None: """Check that a signature on a device or cross-signing key is correct and matches the copy of the device/key that we have stored. Throws an diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 5dc76ef588..5737f8014d 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -32,6 +32,7 @@ from synapse.storage.roommember import RoomsForUser from synapse.streams.config import PaginationConfig from synapse.types import ( JsonDict, + JsonMapping, Requester, RoomStreamToken, StreamKeyType, @@ -454,7 +455,7 @@ class InitialSyncHandler: for s in states ] - async def get_receipts() -> List[JsonDict]: + async def get_receipts() -> List[JsonMapping]: receipts = await self.store.get_linearized_receipts_for_room( room_id, to_key=now_token.receipt_key ) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index c7edada353..a7a29b758b 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -19,6 +19,7 @@ from synapse.appservice import ApplicationService from synapse.streams import EventSource from synapse.types import ( JsonDict, + JsonMapping, ReadReceipt, StreamKeyType, UserID, @@ -204,15 +205,15 @@ class ReceiptsHandler: await self.federation_sender.send_read_receipt(receipt) -class ReceiptEventSource(EventSource[int, JsonDict]): +class ReceiptEventSource(EventSource[int, JsonMapping]): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.config = hs.config @staticmethod def filter_out_private_receipts( - rooms: Sequence[JsonDict], user_id: str - ) -> List[JsonDict]: + rooms: Sequence[JsonMapping], user_id: str + ) -> List[JsonMapping]: """ Filters a list of serialized receipts (as returned by /sync and /initialSync) and removes private read receipts of other users. @@ -229,7 +230,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]): The same as rooms, but filtered. """ - result = [] + result: List[JsonMapping] = [] # Iterate through each room's receipt content. for room in rooms: @@ -282,7 +283,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]): room_ids: Iterable[str], is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[JsonMapping], int]: from_key = int(from_key) to_key = self.get_current_key() @@ -301,7 +302,7 @@ class ReceiptEventSource(EventSource[int, JsonDict]): async def get_new_events_as( self, from_key: int, to_key: int, service: ApplicationService - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[JsonMapping], int]: """Returns a set of new read receipt events that an appservice may be interested in. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1a4d394eda..7bd42f635f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -235,7 +235,7 @@ class SyncResult: archived: List[ArchivedSyncResult] to_device: List[JsonDict] device_lists: DeviceListUpdates - device_one_time_keys_count: JsonDict + device_one_time_keys_count: JsonMapping device_unused_fallback_key_types: List[str] def __bool__(self) -> bool: @@ -1558,7 +1558,7 @@ class SyncHandler: logger.debug("Fetching OTK data") device_id = sync_config.device_id - one_time_keys_count: JsonDict = {} + one_time_keys_count: JsonMapping = {} unused_fallback_key_types: List[str] = [] if device_id: # TODO: We should have a way to let clients differentiate between the states of: diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 4b4227003d..bdefa7f26f 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -26,7 +26,14 @@ from synapse.metrics.background_process_metrics import ( ) from synapse.replication.tcp.streams import TypingStream from synapse.streams import EventSource -from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType, UserID +from synapse.types import ( + JsonDict, + JsonMapping, + Requester, + StrCollection, + StreamKeyType, + UserID, +) from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.metrics import Measure from synapse.util.retryutils import filter_destinations_by_retry_limiter @@ -487,7 +494,7 @@ class TypingWriterHandler(FollowerTypingHandler): raise Exception("Typing writer instance got typing info over replication") -class TypingNotificationEventSource(EventSource[int, JsonDict]): +class TypingNotificationEventSource(EventSource[int, JsonMapping]): def __init__(self, hs: "HomeServer"): self._main_store = hs.get_datastores().main self.clock = hs.get_clock() @@ -497,7 +504,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]): # self.get_typing_handler = hs.get_typing_handler - def _make_event_for(self, room_id: str) -> JsonDict: + def _make_event_for(self, room_id: str) -> JsonMapping: typing = self.get_typing_handler()._room_typing[room_id] return { "type": EduTypes.TYPING, @@ -507,7 +514,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]): async def get_new_events_as( self, from_key: int, service: ApplicationService - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[JsonMapping], int]: """Returns a set of new typing events that an appservice may be interested in. @@ -551,7 +558,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]): room_ids: Iterable[str], is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[JsonMapping], int]: with Measure(self.clock, "typing.get_new_events"): from_key = int(from_key) handler = self.get_typing_handler() diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 554634579e..14784312dc 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -131,7 +131,7 @@ class BulkPushRuleEvaluator: async def _get_rules_for_event( self, event: EventBase, - ) -> Dict[str, FilteredPushRules]: + ) -> Mapping[str, FilteredPushRules]: """Get the push rules for all users who may need to be notified about the event. diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 484db175d0..0553a0621a 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -45,7 +45,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.types import Cursor from synapse.storage.util.sequence import build_sequence_generator -from synapse.types import DeviceListUpdates, JsonDict +from synapse.types import DeviceListUpdates, JsonMapping from synapse.util import json_encoder from synapse.util.caches.descriptors import _CacheContext, cached @@ -268,8 +268,8 @@ class ApplicationServiceTransactionWorkerStore( self, service: ApplicationService, events: Sequence[EventBase], - ephemeral: List[JsonDict], - to_device_messages: List[JsonDict], + ephemeral: List[JsonMapping], + to_device_messages: List[JsonMapping], one_time_keys_count: TransactionOneTimeKeysCount, unused_fallback_keys: TransactionUnusedFallbackKeys, device_list_summary: DeviceListUpdates, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 70faf4b1ec..df596f35f9 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -55,7 +55,12 @@ from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, StreamIdGenerator, ) -from synapse.types import JsonDict, StrCollection, get_verify_key_from_cross_signing_key +from synapse.types import ( + JsonDict, + JsonMapping, + StrCollection, + get_verify_key_from_cross_signing_key, +) from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import LruCache @@ -746,7 +751,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): @cancellable async def get_user_devices_from_cache( self, user_ids: Set[str], user_and_device_ids: List[Tuple[str, str]] - ) -> Tuple[Set[str], Dict[str, Mapping[str, JsonDict]]]: + ) -> Tuple[Set[str], Dict[str, Mapping[str, JsonMapping]]]: """Get the devices (and keys if any) for remote users from the cache. Args: @@ -766,13 +771,13 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): user_ids_not_in_cache = unique_user_ids - user_ids_in_cache # First fetch all the users which all devices are to be returned. - results: Dict[str, Mapping[str, JsonDict]] = {} + results: Dict[str, Mapping[str, JsonMapping]] = {} for user_id in user_ids: if user_id in user_ids_in_cache: results[user_id] = await self.get_cached_devices_for_user(user_id) # Then fetch all device-specific requests, but skip users we've already # fetched all devices for. - device_specific_results: Dict[str, Dict[str, JsonDict]] = {} + device_specific_results: Dict[str, Dict[str, JsonMapping]] = {} for user_id, device_id in user_and_device_ids: if user_id in user_ids_in_cache and user_id not in user_ids: device = await self._get_cached_user_device(user_id, device_id) @@ -801,7 +806,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): return user_ids_in_cache @cached(num_args=2, tree=True) - async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDict: + async def _get_cached_user_device( + self, user_id: str, device_id: str + ) -> JsonMapping: content = await self.db_pool.simple_select_one_onecol( table="device_lists_remote_cache", keyvalues={"user_id": user_id, "device_id": device_id}, @@ -811,7 +818,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): return db_to_json(content) @cached() - async def get_cached_devices_for_user(self, user_id: str) -> Mapping[str, JsonDict]: + async def get_cached_devices_for_user( + self, user_id: str + ) -> Mapping[str, JsonMapping]: devices = await self.db_pool.simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, @@ -1042,7 +1051,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) async def get_device_list_last_stream_id_for_remotes( self, user_ids: Iterable[str] - ) -> Dict[str, Optional[str]]: + ) -> Mapping[str, Optional[str]]: rows = await self.db_pool.simple_select_many_batch( table="device_lists_remote_extremeties", column="user_id", diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index b49dea577c..89fac23f93 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -52,7 +52,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import StreamIdGenerator -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.cancellation import cancellable @@ -125,7 +125,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def get_e2e_device_keys_for_federation_query( self, user_id: str - ) -> Tuple[int, List[JsonDict]]: + ) -> Tuple[int, Sequence[JsonMapping]]: """Get all devices (with any device keys) for a user Returns: @@ -174,7 +174,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cached(iterable=True) async def _get_e2e_device_keys_for_federation_query_inner( self, user_id: str - ) -> List[JsonDict]: + ) -> Sequence[JsonMapping]: """Get all devices (with any device keys) for a user""" devices = await self.get_e2e_device_keys_and_signatures([(user_id, None)]) @@ -578,7 +578,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cached(max_entries=10000) async def count_e2e_one_time_keys( self, user_id: str, device_id: str - ) -> Dict[str, int]: + ) -> Mapping[str, int]: """Count the number of one time keys the server has for a device Returns: A mapping from algorithm to number of keys for that algorithm. @@ -812,7 +812,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker async def get_e2e_cross_signing_key( self, user_id: str, key_type: str, from_user_id: Optional[str] = None - ) -> Optional[JsonDict]: + ) -> Optional[JsonMapping]: """Returns a user's cross-signing key. Args: @@ -833,7 +833,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker return user_keys.get(key_type) @cached(num_args=1) - def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Mapping[str, JsonDict]: + def _get_bare_e2e_cross_signing_keys( + self, user_id: str + ) -> Mapping[str, JsonMapping]: """Dummy function. Only used to make a cache for _get_bare_e2e_cross_signing_keys_bulk. """ @@ -846,7 +848,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ) async def _get_bare_e2e_cross_signing_keys_bulk( self, user_ids: Iterable[str] - ) -> Dict[str, Optional[Mapping[str, JsonDict]]]: + ) -> Mapping[str, Optional[Mapping[str, JsonMapping]]]: """Returns the cross-signing keys for a set of users. The output of this function should be passed to _get_e2e_cross_signing_signatures_txn if the signatures for the calling user need to be fetched. @@ -860,15 +862,12 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker their user ID will map to None. """ - result = await self.db_pool.runInteraction( + return await self.db_pool.runInteraction( "get_bare_e2e_cross_signing_keys_bulk", self._get_bare_e2e_cross_signing_keys_bulk_txn, user_ids, ) - # The `Optional` comes from the `@cachedList` decorator. - return cast(Dict[str, Optional[Mapping[str, JsonDict]]], result) - def _get_bare_e2e_cross_signing_keys_bulk_txn( self, txn: LoggingTransaction, @@ -1026,7 +1025,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker @cancellable async def get_e2e_cross_signing_keys_bulk( self, user_ids: List[str], from_user_id: Optional[str] = None - ) -> Dict[str, Optional[Mapping[str, JsonDict]]]: + ) -> Mapping[str, Optional[Mapping[str, JsonMapping]]]: """Returns the cross-signing keys for a set of users. Args: @@ -1043,7 +1042,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker if from_user_id: result = cast( - Dict[str, Optional[Mapping[str, JsonDict]]], + Dict[str, Optional[Mapping[str, JsonMapping]]], await self.db_pool.runInteraction( "get_e2e_cross_signing_signatures", self._get_e2e_cross_signing_signatures_txn, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 1eb313040e..b788d70fc5 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -24,6 +24,7 @@ from typing import ( Dict, Iterable, List, + Mapping, MutableMapping, Optional, Set, @@ -1633,7 +1634,7 @@ class EventsWorkerStore(SQLBaseStore): self, room_id: str, event_ids: Collection[str], - ) -> Dict[str, bool]: + ) -> Mapping[str, bool]: """Helper for have_seen_events Returns: @@ -2325,7 +2326,7 @@ class EventsWorkerStore(SQLBaseStore): @cachedList(cached_method_name="is_partial_state_event", list_name="event_ids") async def get_partial_state_events( self, event_ids: Collection[str] - ) -> Dict[str, bool]: + ) -> Mapping[str, bool]: """Checks which of the given events have partial state Args: diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 41563371dc..889c578b9c 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -16,7 +16,7 @@ import itertools import json import logging -from typing import Dict, Iterable, Optional, Tuple +from typing import Dict, Iterable, Mapping, Optional, Tuple from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes @@ -130,7 +130,7 @@ class KeyStore(CacheInvalidationWorkerStore): ) async def get_server_keys_json( self, server_name_and_key_ids: Iterable[Tuple[str, str]] - ) -> Dict[Tuple[str, str], FetchKeyResult]: + ) -> Mapping[Tuple[str, str], FetchKeyResult]: """ Args: server_name_and_key_ids: @@ -200,7 +200,7 @@ class KeyStore(CacheInvalidationWorkerStore): ) async def get_server_keys_json_for_remote( self, server_name: str, key_ids: Iterable[str] - ) -> Dict[str, Optional[FetchKeyResultForRemote]]: + ) -> Mapping[str, Optional[FetchKeyResultForRemote]]: """Fetch the cached keys for the given server/key IDs. If we have multiple entries for a given key ID, returns the most recent. diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index b51d20ac26..194b4e031f 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -11,7 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Mapping, + Optional, + Tuple, + cast, +) from synapse.api.presence import PresenceState, UserPresenceState from synapse.replication.tcp.streams import PresenceStream @@ -249,7 +259,7 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) ) async def get_presence_for_users( self, user_ids: Iterable[str] - ) -> Dict[str, UserPresenceState]: + ) -> Mapping[str, UserPresenceState]: rows = await self.db_pool.simple_select_many_batch( table="presence_stream", column="user_id", diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index bec0dc2afe..af69944008 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -216,7 +216,7 @@ class PushRulesWorkerStore( @cachedList(cached_method_name="get_push_rules_for_user", list_name="user_ids") async def bulk_get_push_rules( self, user_ids: Collection[str] - ) -> Dict[str, FilteredPushRules]: + ) -> Mapping[str, FilteredPushRules]: if not user_ids: return {} diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index a074c43989..0231f9407b 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -43,7 +43,7 @@ from synapse.storage.util.id_generators import ( MultiWriterIdGenerator, StreamIdGenerator, ) -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonMapping from synapse.util import json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -218,7 +218,7 @@ class ReceiptsWorkerStore(SQLBaseStore): @cached() async def _get_receipts_for_user_with_orderings( self, user_id: str, receipt_type: str - ) -> JsonDict: + ) -> JsonMapping: """ Fetch receipts for all rooms that the given user is joined to. @@ -258,7 +258,7 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_linearized_receipts_for_rooms( self, room_ids: Iterable[str], to_key: int, from_key: Optional[int] = None - ) -> List[dict]: + ) -> List[JsonMapping]: """Get receipts for multiple rooms for sending to clients. Args: @@ -287,7 +287,7 @@ class ReceiptsWorkerStore(SQLBaseStore): async def get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None - ) -> Sequence[JsonDict]: + ) -> Sequence[JsonMapping]: """Get receipts for a single room for sending to clients. Args: @@ -310,7 +310,7 @@ class ReceiptsWorkerStore(SQLBaseStore): @cached(tree=True) async def _get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None - ) -> Sequence[JsonDict]: + ) -> Sequence[JsonMapping]: """See get_linearized_receipts_for_room""" def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: @@ -353,7 +353,7 @@ class ReceiptsWorkerStore(SQLBaseStore): ) async def _get_linearized_receipts_for_rooms( self, room_ids: Collection[str], to_key: int, from_key: Optional[int] = None - ) -> Dict[str, Sequence[JsonDict]]: + ) -> Mapping[str, Sequence[JsonMapping]]: if not room_ids: return {} @@ -415,7 +415,7 @@ class ReceiptsWorkerStore(SQLBaseStore): ) async def get_linearized_receipts_for_all_rooms( self, to_key: int, from_key: Optional[int] = None - ) -> Mapping[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """Get receipts for all rooms between two stream_ids, up to a limit of the latest 100 read receipts. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 96908f14ba..6ba9c9651f 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -519,7 +519,7 @@ class RelationsWorkerStore(SQLBaseStore): @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") async def get_applicable_edits( self, event_ids: Collection[str] - ) -> Dict[str, Optional[EventBase]]: + ) -> Mapping[str, Optional[EventBase]]: """Get the most recent edit (if any) that has happened for the given events. @@ -605,7 +605,7 @@ class RelationsWorkerStore(SQLBaseStore): @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") async def get_thread_summaries( self, event_ids: Collection[str] - ) -> Dict[str, Optional[Tuple[int, EventBase]]]: + ) -> Mapping[str, Optional[Tuple[int, EventBase]]]: """Get the number of threaded replies and the latest reply (if any) for the given events. Args: @@ -779,7 +779,7 @@ class RelationsWorkerStore(SQLBaseStore): @cachedList(cached_method_name="get_thread_participated", list_name="event_ids") async def get_threads_participated( self, event_ids: Collection[str], user_id: str - ) -> Dict[str, bool]: + ) -> Mapping[str, bool]: """Get whether the requesting user participated in the given threads. This is separate from get_thread_summaries since that can be cached across diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index fff259f74c..7b503dd697 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -191,7 +191,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def get_subset_users_in_room_with_profiles( self, room_id: str, user_ids: Collection[str] - ) -> Dict[str, ProfileInfo]: + ) -> Mapping[str, ProfileInfo]: """Get a mapping from user ID to profile information for a list of users in a given room. @@ -676,7 +676,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def _get_rooms_for_users( self, user_ids: Collection[str] - ) -> Dict[str, FrozenSet[str]]: + ) -> Mapping[str, FrozenSet[str]]: """A batched version of `get_rooms_for_user`. Returns: @@ -881,7 +881,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def _get_user_ids_from_membership_event_ids( self, event_ids: Iterable[str] - ) -> Dict[str, Optional[str]]: + ) -> Mapping[str, Optional[str]]: """For given set of member event_ids check if they point to a join event. @@ -1191,7 +1191,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) async def get_membership_from_event_ids( self, member_event_ids: Iterable[str] - ) -> Dict[str, Optional[EventIdMembership]]: + ) -> Mapping[str, Optional[EventIdMembership]]: """Get user_id and membership of a set of event IDs. Returns: diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index ebb2ae964f..5eaaff5b68 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -14,7 +14,17 @@ # limitations under the License. import collections.abc import logging -from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Collection, + Dict, + Iterable, + Mapping, + Optional, + Set, + Tuple, +) import attr @@ -372,7 +382,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) async def _get_state_group_for_events( self, event_ids: Collection[str] - ) -> Dict[str, int]: + ) -> Mapping[str, int]: """Returns mapping event_id -> state_group. Raises: diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index efd21b5bfc..8f70eff809 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -14,7 +14,7 @@ import logging from enum import Enum -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Iterable, List, Mapping, Optional, Tuple, cast import attr from canonicaljson import encode_canonical_json @@ -210,7 +210,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): ) async def get_destination_retry_timings_batch( self, destinations: StrCollection - ) -> Dict[str, Optional[DestinationRetryTimings]]: + ) -> Mapping[str, Optional[DestinationRetryTimings]]: rows = await self.db_pool.simple_select_many_batch( table="destinations", iterable=destinations, diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index f79006533f..06fcbe5e54 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Iterable +from typing import Iterable, Mapping from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main import CacheInvalidationWorkerStore @@ -40,7 +40,7 @@ class UserErasureWorkerStore(CacheInvalidationWorkerStore): return bool(result) @cachedList(cached_method_name="is_user_erased", list_name="user_ids") - async def are_users_erased(self, user_ids: Iterable[str]) -> Dict[str, bool]: + async def are_users_erased(self, user_ids: Iterable[str]) -> Mapping[str, bool]: """ Checks which users in a list have requested erasure From 7ec0a141b4bdda0fa67cb1f2af7f321b9963f0b8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 20 Sep 2023 07:48:55 -0400 Subject: [PATCH 508/562] Convert more cached return values to immutable types (#16356) --- changelog.d/16356.misc | 1 + synapse/api/filtering.py | 8 ++--- synapse/federation/federation_client.py | 4 +-- synapse/handlers/federation_event.py | 2 +- synapse/handlers/relations.py | 14 ++++++-- synapse/rest/client/filter.py | 4 +-- synapse/storage/controllers/state.py | 2 +- synapse/storage/databases/main/filtering.py | 4 +-- synapse/storage/databases/main/relations.py | 4 +-- synapse/storage/databases/main/roommember.py | 10 +++--- tests/util/caches/test_descriptors.py | 35 +++++++++++--------- 11 files changed, 52 insertions(+), 36 deletions(-) create mode 100644 changelog.d/16356.misc diff --git a/changelog.d/16356.misc b/changelog.d/16356.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16356.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 0995ecbe83..74ee8e9f3f 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -37,7 +37,7 @@ from synapse.api.constants import EduTypes, EventContentFields from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState from synapse.events import EventBase, relation_from_event -from synapse.types import JsonDict, RoomID, UserID +from synapse.types import JsonDict, JsonMapping, RoomID, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -191,7 +191,7 @@ FilterEvent = TypeVar("FilterEvent", EventBase, UserPresenceState, JsonDict) class FilterCollection: - def __init__(self, hs: "HomeServer", filter_json: JsonDict): + def __init__(self, hs: "HomeServer", filter_json: JsonMapping): self._filter_json = filter_json room_filter_json = self._filter_json.get("room", {}) @@ -219,7 +219,7 @@ class FilterCollection: def __repr__(self) -> str: return "" % (json.dumps(self._filter_json),) - def get_filter_json(self) -> JsonDict: + def get_filter_json(self) -> JsonMapping: return self._filter_json def timeline_limit(self) -> int: @@ -313,7 +313,7 @@ class FilterCollection: class Filter: - def __init__(self, hs: "HomeServer", filter_json: JsonDict): + def __init__(self, hs: "HomeServer", filter_json: JsonMapping): self._hs = hs self._store = hs.get_datastores().main self.filter_json = filter_json diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 607013f121..c8bc46415d 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -64,7 +64,7 @@ from synapse.federation.transport.client import SendJoinResponse from synapse.http.client import is_unknown_endpoint from synapse.http.types import QueryParams from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace -from synapse.types import JsonDict, UserID, get_domain_from_id +from synapse.types import JsonDict, StrCollection, UserID, get_domain_from_id from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -1704,7 +1704,7 @@ class FederationClient(FederationBase): async def timestamp_to_event( self, *, - destinations: List[str], + destinations: StrCollection, room_id: str, timestamp: int, direction: Direction, diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index eedde97ab0..7c62cdfaef 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1538,7 +1538,7 @@ class FederationEventHandler: logger.exception("Failed to resync device for %s", sender) async def backfill_event_id( - self, destinations: List[str], room_id: str, event_id: str + self, destinations: StrCollection, room_id: str, event_id: str ) -> PulledPduInfo: """Backfill a single event and persist it as a non-outlier which means we also pull in all of the state and auth events necessary for it. diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index db97f7aede..9b13448cdd 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -13,7 +13,17 @@ # limitations under the License. import enum import logging -from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, Optional +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + FrozenSet, + Iterable, + List, + Mapping, + Optional, + Sequence, +) import attr @@ -245,7 +255,7 @@ class RelationsHandler: async def get_references_for_events( self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() - ) -> Dict[str, List[_RelatedEvent]]: + ) -> Mapping[str, Sequence[_RelatedEvent]]: """Get a list of references to the given events. Args: diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index 5da1e511a2..b5879496db 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -19,7 +19,7 @@ from synapse.api.errors import AuthError, NotFoundError, StoreError, SynapseErro from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, JsonMapping, UserID from ._base import client_patterns, set_timeline_upper_limit @@ -41,7 +41,7 @@ class GetFilterRestServlet(RestServlet): async def on_GET( self, request: SynapseRequest, user_id: str, filter_id: str - ) -> Tuple[int, JsonDict]: + ) -> Tuple[int, JsonMapping]: target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 278c7832ba..10d219c045 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -582,7 +582,7 @@ class StateStorageController: @trace @tag_args - async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]: + async def get_current_hosts_in_room_ordered(self, room_id: str) -> Tuple[str, ...]: """Get current hosts in room based on current state. Blocks until we have full state for the given room. This only happens for rooms diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index 047de6283a..7d94685caf 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -25,7 +25,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.engines import PostgresEngine -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, JsonMapping, UserID from synapse.util.caches.descriptors import cached if TYPE_CHECKING: @@ -145,7 +145,7 @@ class FilteringWorkerStore(SQLBaseStore): @cached(num_args=2) async def get_user_filter( self, user_id: UserID, filter_id: Union[int, str] - ) -> JsonDict: + ) -> JsonMapping: # filter_id is BIGINT UNSIGNED, so if it isn't a number, fail # with a coherent error message rather than 500 M_UNKNOWN. try: diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 6ba9c9651f..b67f780c10 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -465,7 +465,7 @@ class RelationsWorkerStore(SQLBaseStore): @cachedList(cached_method_name="get_references_for_event", list_name="event_ids") async def get_references_for_events( self, event_ids: Collection[str] - ) -> Mapping[str, Optional[List[_RelatedEvent]]]: + ) -> Mapping[str, Optional[Sequence[_RelatedEvent]]]: """Get a list of references to the given events. Args: @@ -931,7 +931,7 @@ class RelationsWorkerStore(SQLBaseStore): room_id: str, limit: int = 5, from_token: Optional[ThreadsNextBatch] = None, - ) -> Tuple[List[str], Optional[ThreadsNextBatch]]: + ) -> Tuple[Sequence[str], Optional[ThreadsNextBatch]]: """Get a list of thread IDs, ordered by topological ordering of their latest reply. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 7b503dd697..3755773faa 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -984,7 +984,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) @cached(iterable=True, max_entries=10000) - async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]: + async def get_current_hosts_in_room_ordered(self, room_id: str) -> Tuple[str, ...]: """ Get current hosts in room based on current state. @@ -1013,12 +1013,14 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # `get_users_in_room` rather than funky SQL. domains = await self.get_current_hosts_in_room(room_id) - return list(domains) + return tuple(domains) # For PostgreSQL we can use a regex to pull out the domains from the # joined users in `current_state_events` via regex. - def get_current_hosts_in_room_ordered_txn(txn: LoggingTransaction) -> List[str]: + def get_current_hosts_in_room_ordered_txn( + txn: LoggingTransaction, + ) -> Tuple[str, ...]: # Returns a list of servers currently joined in the room sorted by # longest in the room first (aka. with the lowest depth). The # heuristic of sorting by servers who have been in the room the @@ -1043,7 +1045,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ txn.execute(sql, (room_id,)) # `server_domain` will be `NULL` for malformed MXIDs with no colons. - return [d for d, in txn if d is not None] + return tuple(d for d, in txn if d is not None) return await self.db_pool.runInteraction( "get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 168419f440..7e8725e610 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -15,10 +15,10 @@ import logging from typing import ( Any, - Dict, Generator, Iterable, List, + Mapping, NoReturn, Optional, Set, @@ -96,7 +96,7 @@ class DescriptorTestCase(unittest.TestCase): self.mock = mock.Mock() @descriptors.cached(num_args=1) - def fn(self, arg1: int, arg2: int) -> mock.Mock: + def fn(self, arg1: int, arg2: int) -> str: return self.mock(arg1, arg2) obj = Cls() @@ -228,8 +228,9 @@ class DescriptorTestCase(unittest.TestCase): call_count = 0 @cached() - def fn(self, arg1: int) -> Optional[Deferred]: + def fn(self, arg1: int) -> Deferred: self.call_count += 1 + assert self.result is not None return self.result obj = Cls() @@ -401,21 +402,21 @@ class DescriptorTestCase(unittest.TestCase): self.mock = mock.Mock() @descriptors.cached(iterable=True) - def fn(self, arg1: int, arg2: int) -> List[str]: + def fn(self, arg1: int, arg2: int) -> Tuple[str, ...]: return self.mock(arg1, arg2) obj = Cls() - obj.mock.return_value = ["spam", "eggs"] + obj.mock.return_value = ("spam", "eggs") r = obj.fn(1, 2) - self.assertEqual(r.result, ["spam", "eggs"]) + self.assertEqual(r.result, ("spam", "eggs")) obj.mock.assert_called_once_with(1, 2) obj.mock.reset_mock() # a call with different params should call the mock again - obj.mock.return_value = ["chips"] + obj.mock.return_value = ("chips",) r = obj.fn(1, 3) - self.assertEqual(r.result, ["chips"]) + self.assertEqual(r.result, ("chips",)) obj.mock.assert_called_once_with(1, 3) obj.mock.reset_mock() @@ -423,9 +424,9 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(len(obj.fn.cache.cache), 3) r = obj.fn(1, 2) - self.assertEqual(r.result, ["spam", "eggs"]) + self.assertEqual(r.result, ("spam", "eggs")) r = obj.fn(1, 3) - self.assertEqual(r.result, ["chips"]) + self.assertEqual(r.result, ("chips",)) obj.mock.assert_not_called() def test_cache_iterable_with_sync_exception(self) -> None: @@ -784,7 +785,9 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - async def list_fn(self, args1: Iterable[int], arg2: int) -> Dict[int, str]: + async def list_fn( + self, args1: Iterable[int], arg2: int + ) -> Mapping[int, str]: context = current_context() assert isinstance(context, LoggingContext) assert context.name == "c1" @@ -847,11 +850,11 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - def list_fn(self, args1: List[int]) -> "Deferred[dict]": + def list_fn(self, args1: List[int]) -> "Deferred[Mapping[int, str]]": return self.mock(args1) obj = Cls() - deferred_result: "Deferred[dict]" = Deferred() + deferred_result: "Deferred[Mapping[int, str]]" = Deferred() obj.mock.return_value = deferred_result # start off several concurrent lookups of the same key @@ -890,7 +893,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @descriptors.cachedList(cached_method_name="fn", list_name="args1") - async def list_fn(self, args1: List[int], arg2: int) -> Dict[int, str]: + async def list_fn(self, args1: List[int], arg2: int) -> Mapping[int, str]: # we want this to behave like an asynchronous function await run_on_reactor() return self.mock(args1, arg2) @@ -929,7 +932,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args: List[int]) -> Dict[int, str]: + async def list_fn(self, args: List[int]) -> Mapping[int, str]: await complete_lookup return {arg: str(arg) for arg in args} @@ -964,7 +967,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): pass @cachedList(cached_method_name="fn", list_name="args") - async def list_fn(self, args: List[int]) -> Dict[int, str]: + async def list_fn(self, args: List[int]) -> Mapping[int, str]: await make_deferred_yieldable(complete_lookup) self.inner_context_was_finished = current_context().finished return {arg: str(arg) for arg in args} From c9a0e1673ac6e5553216775d2bcd28a4630cd026 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 21 Sep 2023 09:47:29 -0400 Subject: [PATCH 509/562] Add missing license header. (#16359) --- changelog.d/16359.misc | 1 + synapse/rest/models.py | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelog.d/16359.misc diff --git a/changelog.d/16359.misc b/changelog.d/16359.misc new file mode 100644 index 0000000000..8752085fc6 --- /dev/null +++ b/changelog.d/16359.misc @@ -0,0 +1 @@ +Add missing licence header. diff --git a/synapse/rest/models.py b/synapse/rest/models.py index ac39cda8e5..d47de5c19e 100644 --- a/synapse/rest/models.py +++ b/synapse/rest/models.py @@ -1,3 +1,16 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from pydantic import BaseModel, Extra From 3d9661baaafc62c963b740ac393a624820404f3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:33:42 +0100 Subject: [PATCH 510/562] Bump dawidd6/action-download-artifact from 2.27.0 to 2.28.0 (#16374) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs-pr-netlify.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index 928bcae8cf..b443cd87d1 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0 + uses: dawidd6/action-download-artifact@268677152d06ba59fcec7a7f0b5d961b6ccd7e1e # v2.28.0 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} From a40f7724ff64ebf2a8d8094a15d56d1f7469449c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:34:16 +0100 Subject: [PATCH 511/562] Bump docker/setup-buildx-action from 2 to 3 (#16375) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/release-artifacts.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a1793b9010..12440f10a8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -24,7 +24,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Inspect builder run: docker buildx inspect diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index f031127cce..b1ee3be7ec 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -61,7 +61,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: install: true From 9479998c7620dcc7dd4fb0051a9e635c391b78f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:35:24 +0100 Subject: [PATCH 512/562] Bump gitpython from 3.1.35 to 3.1.37 (#16376) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index e1b3733c45..2d64cb33c2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -586,18 +586,21 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.35" +version = "3.1.37" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.35-py3-none-any.whl", hash = "sha256:c19b4292d7a1d3c0f653858db273ff8a6614100d1eb1528b014ec97286193c09"}, - {file = "GitPython-3.1.35.tar.gz", hash = "sha256:9cbefbd1789a5fe9bcf621bb34d3f441f3a90c8461d377f84eda73e721d9b06b"}, + {file = "GitPython-3.1.37-py3-none-any.whl", hash = "sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33"}, + {file = "GitPython-3.1.37.tar.gz", hash = "sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" +[package.extras] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"] + [[package]] name = "hiredis" version = "2.2.3" @@ -3347,4 +3350,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "104f108b3c966be05e17cf9975b4061942b354fe9a57cbf7372371fd56b1bf24" +content-hash = "1481c785665220fbf79613030a581b7289d49b7f336cc136dd87d39048f5446c" From d61d6a3e688a5a80490d34f7aa24d249f024dc4d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:42:46 +0100 Subject: [PATCH 513/562] Bump msgpack from 1.0.5 to 1.0.6 (#16377) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 123 +++++++++++++++++++++++++--------------------------- 1 file changed, 58 insertions(+), 65 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2d64cb33c2..e43eee384e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1376,74 +1376,67 @@ files = [ [[package]] name = "msgpack" -version = "1.0.5" +version = "1.0.6" description = "MessagePack serializer" optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"}, - {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"}, - {file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"}, - {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"}, - {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"}, - {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"}, - {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"}, - {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"}, - {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"}, - {file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"}, - {file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"}, - {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"}, - {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"}, - {file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"}, - {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"}, - {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"}, - {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"}, - {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"}, - {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"}, - {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"}, - {file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"}, - {file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"}, - {file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"}, - {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"}, - {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"}, - {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"}, - {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"}, - {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"}, - {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"}, - {file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"}, - {file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"}, - {file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"}, - {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"}, - {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"}, - {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"}, - {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"}, - {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"}, - {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"}, - {file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"}, - {file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"}, - {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"}, - {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"}, - {file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"}, - {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"}, - {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"}, - {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"}, - {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"}, - {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"}, - {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"}, - {file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"}, - {file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"}, - {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"}, - {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"}, - {file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"}, - {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"}, - {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"}, - {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"}, - {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"}, - {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"}, - {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"}, - {file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"}, - {file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"}, - {file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"}, + {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce"}, + {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5"}, + {file = "msgpack-1.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803"}, + {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f"}, + {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c"}, + {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d"}, + {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c"}, + {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980"}, + {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c"}, + {file = "msgpack-1.0.6-cp310-cp310-win32.whl", hash = "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4"}, + {file = "msgpack-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e"}, + {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e"}, + {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367"}, + {file = "msgpack-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89"}, + {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b"}, + {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911"}, + {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37"}, + {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8"}, + {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80"}, + {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7"}, + {file = "msgpack-1.0.6-cp311-cp311-win32.whl", hash = "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887"}, + {file = "msgpack-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e"}, + {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823"}, + {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0"}, + {file = "msgpack-1.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af"}, + {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc"}, + {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685"}, + {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f"}, + {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f"}, + {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9"}, + {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077"}, + {file = "msgpack-1.0.6-cp312-cp312-win32.whl", hash = "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5"}, + {file = "msgpack-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341"}, + {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7"}, + {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052"}, + {file = "msgpack-1.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76"}, + {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb"}, + {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8"}, + {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd"}, + {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2"}, + {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe"}, + {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2"}, + {file = "msgpack-1.0.6-cp38-cp38-win32.whl", hash = "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8"}, + {file = "msgpack-1.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355"}, + {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006"}, + {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a"}, + {file = "msgpack-1.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9"}, + {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787"}, + {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3"}, + {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed"}, + {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428"}, + {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b"}, + {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8"}, + {file = "msgpack-1.0.6-cp39-cp39-win32.whl", hash = "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e"}, + {file = "msgpack-1.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb"}, + {file = "msgpack-1.0.6.tar.gz", hash = "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e"}, ] [[package]] From 9ee54c13703de8c95223247209826df11b55cd01 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:51:04 +0100 Subject: [PATCH 514/562] Bump sentry-sdk from 1.30.0 to 1.31.0 (#16378) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index e43eee384e..6f451106ec 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2384,13 +2384,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.30.0" +version = "1.31.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.30.0.tar.gz", hash = "sha256:7dc873b87e1faf4d00614afd1058bfa1522942f33daef8a59f90de8ed75cd10c"}, - {file = "sentry_sdk-1.30.0-py2.py3-none-any.whl", hash = "sha256:2e53ad63f96bb9da6570ba2e755c267e529edcf58580a2c0d2a11ef26e1e678b"}, + {file = "sentry-sdk-1.31.0.tar.gz", hash = "sha256:6de2e88304873484207fed836388e422aeff000609b104c802749fd89d56ba5b"}, + {file = "sentry_sdk-1.31.0-py2.py3-none-any.whl", hash = "sha256:64a7141005fb775b9db298a30de93e3b83e0ddd1232dc6f36eb38aebc1553291"}, ] [package.dependencies] @@ -2400,10 +2400,12 @@ urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] arq = ["arq (>=0.23)"] +asyncpg = ["asyncpg (>=0.23)"] beam = ["apache-beam (>=2.12)"] bottle = ["bottle (>=0.12.13)"] celery = ["celery (>=3)"] chalice = ["chalice (>=1.16.0)"] +clickhouse-driver = ["clickhouse-driver (>=0.2.0)"] django = ["django (>=1.8)"] falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] From fad4c63d2528c6b9f1711718a7cc5cdefde665be Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 25 Sep 2023 15:45:17 +0100 Subject: [PATCH 515/562] Get CI to check PRs have been signed-off (#16348) --- .github/workflows/tests.yml | 8 +++++++- changelog.d/16348.misc | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16348.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fb117380d0..b5fffb3653 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -12,6 +12,10 @@ concurrency: cancel-in-progress: true jobs: + check-signoff: + if: "github.event_name == 'pull_request'" + uses: "matrix-org/backend-meta/.github/workflows/sign-off.yml@v2" + # Job to detect what has changed so we don't run e.g. Rust checks on PRs that # don't modify Rust code. changes: @@ -225,6 +229,7 @@ jobs: - check-lockfile - lint-clippy - lint-rustfmt + - check-signoff runs-on: ubuntu-latest steps: - run: "true" @@ -627,9 +632,10 @@ jobs: with: needs: ${{ toJSON(needs) }} - # The newsfile lint may be skipped on non PR builds + # The newsfile and signoff lints may be skipped on non PR builds # Cargo test is skipped if there is no changes on Rust code skippable: | + check-signoff lint-newsfile cargo-test cargo-bench diff --git a/changelog.d/16348.misc b/changelog.d/16348.misc new file mode 100644 index 0000000000..846bb048c8 --- /dev/null +++ b/changelog.d/16348.misc @@ -0,0 +1 @@ +Get CI to check PRs have been signed-off. From 6d7095913acd43984e728049d42e66b451bcd71c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:14:05 -0400 Subject: [PATCH 516/562] Bump cryptography from 41.0.3 to 41.0.4 (#16362) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6f451106ec..5d560151fb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -457,34 +457,34 @@ files = [ [[package]] name = "cryptography" -version = "41.0.3" +version = "41.0.4" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"}, - {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"}, - {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"}, - {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"}, - {file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"}, - {file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"}, - {file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"}, + {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, + {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, + {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, + {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, + {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, + {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, + {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, + {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, + {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, + {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, + {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, ] [package.dependencies] From 12611bfcddfe87e3bad90ef96a648acc2f1cebf3 Mon Sep 17 00:00:00 2001 From: Maxwell G Date: Mon, 25 Sep 2023 10:19:08 -0500 Subject: [PATCH 517/562] Add support for pydantic v2 via pydantic.v1 compat module (#16332) While maintaining support with pydantic v1. --- changelog.d/16332.misc | 1 + poetry.lock | 193 +++++++++++++++------ pyproject.toml | 12 +- scripts-dev/check_pydantic_models.py | 98 +++++++++-- synapse/_pydantic_compat.py | 26 +++ synapse/config/_util.py | 10 +- synapse/config/workers.py | 10 +- synapse/events/validator.py | 10 +- synapse/http/servlet.py | 11 +- synapse/rest/client/account.py | 7 +- synapse/rest/client/devices.py | 7 +- synapse/rest/client/directory.py | 8 +- synapse/rest/client/models.py | 7 +- synapse/rest/key/v2/remote_key_resource.py | 8 +- synapse/rest/models.py | 9 +- synapse/storage/background_updates.py | 7 +- tests/rest/client/test_models.py | 8 +- 17 files changed, 343 insertions(+), 89 deletions(-) create mode 100644 changelog.d/16332.misc create mode 100644 synapse/_pydantic_compat.py diff --git a/changelog.d/16332.misc b/changelog.d/16332.misc new file mode 100644 index 0000000000..862d547d60 --- /dev/null +++ b/changelog.d/16332.misc @@ -0,0 +1 @@ +Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). diff --git a/poetry.lock b/poetry.lock index 5d560151fb..9a8be27b6f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -11,6 +11,20 @@ files = [ {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, ] +[[package]] +name = "annotated-types" +version = "0.5.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.7" +files = [ + {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, + {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "astroid" version = "2.15.0" @@ -1818,55 +1832,140 @@ files = [ [[package]] name = "pydantic" -version = "1.10.11" -description = "Data validation and settings management using python type hints" +version = "2.3.0" +description = "Data validation using Python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f"}, - {file = "pydantic-1.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e"}, - {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151"}, - {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7"}, - {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588"}, - {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f"}, - {file = "pydantic-1.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847"}, - {file = "pydantic-1.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb"}, - {file = "pydantic-1.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b"}, - {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae"}, - {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66"}, - {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216"}, - {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c"}, - {file = "pydantic-1.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b"}, - {file = "pydantic-1.10.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6"}, - {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713"}, - {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c"}, - {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248"}, - {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36"}, - {file = "pydantic-1.10.11-cp37-cp37m-win_amd64.whl", hash = "sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629"}, - {file = "pydantic-1.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3"}, - {file = "pydantic-1.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f"}, - {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb"}, - {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d"}, - {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f"}, - {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e"}, - {file = "pydantic-1.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19"}, - {file = "pydantic-1.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622"}, - {file = "pydantic-1.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1"}, - {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999"}, - {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303"}, - {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604"}, - {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13"}, - {file = "pydantic-1.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e"}, - {file = "pydantic-1.10.11-py3-none-any.whl", hash = "sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e"}, - {file = "pydantic-1.10.11.tar.gz", hash = "sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528"}, + {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, + {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, ] [package.dependencies] -typing-extensions = ">=4.2.0" +annotated-types = ">=0.4.0" +pydantic-core = "2.6.3" +typing-extensions = ">=4.6.1" [package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.6.3" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, + {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, + {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, + {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, + {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, + {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, + {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, + {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, + {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, + {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, + {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, + {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, + {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, + {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, + {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, + {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, + {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, + {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, + {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, + {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, + {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, + {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, + {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, + {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, + {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, + {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, + {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, + {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, + {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, + {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, + {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, + {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, + {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, + {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pygithub" @@ -2071,7 +2170,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2079,15 +2177,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2104,7 +2195,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2112,7 +2202,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3345,4 +3434,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "1481c785665220fbf79613030a581b7289d49b7f336cc136dd87d39048f5446c" +content-hash = "364c309486e9d93d4da8a1a3784d5ecd7d2a9734cf84dcd4a991f2cd54f0b5b5" diff --git a/pyproject.toml b/pyproject.toml index f69336a73f..ea55d81b13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -209,11 +209,11 @@ cryptography = ">=3.4.7" # ijson 3.1.4 fixes a bug with "." in property names ijson = ">=3.1.4" matrix-common = "^1.3.0" -# We need packaging.requirements.Requirement, added in 16.1. -packaging = ">=16.1" -# This is the most recent version of Pydantic with available on common distros. -# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858) -pydantic = "^1.7.4" +# We need packaging.verison.Version(...).major added in 20.0. +packaging = ">=20.0" +# We support pydantic v1 and pydantic v2 via the pydantic.v1 compat module. +# See https://github.com/matrix-org/synapse/issues/15858 +pydantic = ">=1.7.4, <3" # This is for building the rust components during "poetry install", which # currently ignores the `build-system.requires` directive (c.f. @@ -321,6 +321,8 @@ all = [ isort = ">=5.10.1" black = ">=22.7.0" ruff = "0.0.290" +# Type checking only works with the pydantic.v1 compat module from pydantic v2 +pydantic = "^2" # Typechecking lxml-stubs = ">=0.4.0" diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py index 9f2b7ded5b..d1cfc9a85c 100755 --- a/scripts-dev/check_pydantic_models.py +++ b/scripts-dev/check_pydantic_models.py @@ -36,11 +36,41 @@ import textwrap import traceback import unittest.mock from contextlib import contextmanager -from typing import Any, Callable, Dict, Generator, List, Set, Type, TypeVar +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generator, + List, + Set, + Type, + TypeVar, +) from parameterized import parameterized -from pydantic import BaseModel as PydanticBaseModel, conbytes, confloat, conint, constr -from pydantic.typing import get_args + +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import ( + BaseModel as PydanticBaseModel, + conbytes, + confloat, + conint, + constr, + ) + from pydantic.v1.typing import get_args +else: + from pydantic import ( + BaseModel as PydanticBaseModel, + conbytes, + confloat, + conint, + constr, + ) + from pydantic.typing import get_args + from typing_extensions import ParamSpec logger = logging.getLogger(__name__) @@ -251,7 +281,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic import constr + try: + from pydantic.v1 import constr + except ImportError: + from pydantic import constr constr() """ ) @@ -269,7 +302,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic import * + try: + from pydantic.v1 import * + except ImportError: + from pydantic import * constr() """ ) @@ -278,7 +314,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic.types import constr + try: + from pydantic.v1.types import constr + except ImportError: + from pydantic.types import constr constr() """ ) @@ -287,8 +326,11 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - import pydantic.types - pydantic.types.constr() + try: + from pydantic.v1 import types as pydantic_types + except ImportError: + from pydantic import types as pydantic_types + pydantic_types.constr() """ ) @@ -296,7 +338,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic import constr + try: + from pydantic.v1 import constr + except ImportError: + from pydantic import constr constr(min_length=10) """ ) @@ -305,7 +350,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic import constr + try: + from pydantic.v1 import constr + except ImportError: + from pydantic import constr constr(strict=False) """ ) @@ -314,7 +362,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(): run_test_snippet( """ - from pydantic import constr + try: + from pydantic.v1 import constr + except ImportError: + from pydantic import constr constr(strict=True) """ ) @@ -323,7 +374,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic import constr + try: + from pydantic.v1 import constr + except ImportError: + from pydantic import constr x: constr() """ ) @@ -332,7 +386,10 @@ class TestConstrainedTypesPatch(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic import BaseModel, conint + try: + from pydantic.v1 import BaseModel, conint + except ImportError: + from pydantic import BaseModel, conint class C: x: conint() """ @@ -361,7 +418,10 @@ class TestFieldTypeInspection(unittest.TestCase): run_test_snippet( f""" from typing import * - from pydantic import * + try: + from pydantic.v1 import * + except ImportError: + from pydantic import * class C(BaseModel): f: {annotation} """ @@ -388,7 +448,10 @@ class TestFieldTypeInspection(unittest.TestCase): run_test_snippet( f""" from typing import * - from pydantic import * + try: + from pydantic.v1 import * + except ImportError: + from pydantic import * class C(BaseModel): f: {annotation} """ @@ -398,7 +461,10 @@ class TestFieldTypeInspection(unittest.TestCase): with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException): run_test_snippet( """ - from pydantic.main import BaseModel + try: + from pydantic.v1.main import BaseModel + except ImportError: + from pydantic.main import BaseModel class C(BaseModel): f: str """ diff --git a/synapse/_pydantic_compat.py b/synapse/_pydantic_compat.py new file mode 100644 index 0000000000..ddff72afa1 --- /dev/null +++ b/synapse/_pydantic_compat.py @@ -0,0 +1,26 @@ +# Copyright 2023 Maxwell G +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from packaging.version import Version + +try: + from pydantic import __version__ as pydantic_version +except ImportError: + import importlib.metadata + + pydantic_version = importlib.metadata.version("pydantic") + +HAS_PYDANTIC_V2: bool = Version(pydantic_version).major == 2 + +__all__ = ("HAS_PYDANTIC_V2",) diff --git a/synapse/config/_util.py b/synapse/config/_util.py index acccca413b..746838eee3 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -11,10 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Type, TypeVar +from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar import jsonschema -from pydantic import BaseModel, ValidationError, parse_obj_as + +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import BaseModel, ValidationError, parse_obj_as +else: + from pydantic import BaseModel, ValidationError, parse_obj_as from synapse.config._base import ConfigError from synapse.types import JsonDict, StrSequence diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 6567fb6bb0..f1766088fc 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -15,10 +15,16 @@ import argparse import logging -from typing import Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union import attr -from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr + +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import BaseModel, Extra, StrictBool, StrictInt, StrictStr +else: + from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr from synapse.config._base import ( Config, diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 5da50cb0d2..a637fadfab 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -12,10 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections.abc -from typing import List, Type, Union, cast +from typing import TYPE_CHECKING, List, Type, Union, cast import jsonschema -from pydantic import Field, StrictBool, StrictStr + +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import Field, StrictBool, StrictStr +else: + from pydantic import Field, StrictBool, StrictStr from synapse.api.constants import ( MAX_ALIAS_LENGTH, diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 5d79d31579..d9d5655c95 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -28,8 +28,15 @@ from typing import ( overload, ) -from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError -from pydantic.error_wrappers import ErrorWrapper +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import BaseModel, MissingError, PydanticValueError, ValidationError + from pydantic.v1.error_wrappers import ErrorWrapper +else: + from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError + from pydantic.error_wrappers import ErrorWrapper + from typing_extensions import Literal from twisted.web.server import Request diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 49cd0805fd..e74a87af4d 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -18,7 +18,12 @@ import random from typing import TYPE_CHECKING, List, Optional, Tuple from urllib.parse import urlparse -from pydantic import StrictBool, StrictStr, constr +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import StrictBool, StrictStr, constr +else: + from pydantic import StrictBool, StrictStr, constr from typing_extensions import Literal from twisted.web.server import Request diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 925f037743..80ae937921 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -17,7 +17,12 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, List, Optional, Tuple -from pydantic import Extra, StrictStr +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import Extra, StrictStr +else: + from pydantic import Extra, StrictStr from synapse.api import errors from synapse.api.errors import NotFoundError, SynapseError, UnrecognizedRequestError diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index 570bb52747..82944ca711 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -15,7 +15,13 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple -from pydantic import StrictStr +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import StrictStr +else: + from pydantic import StrictStr + from typing_extensions import Literal from twisted.web.server import Request diff --git a/synapse/rest/client/models.py b/synapse/rest/client/models.py index 3d7940b0fc..880f79473c 100644 --- a/synapse/rest/client/models.py +++ b/synapse/rest/client/models.py @@ -13,7 +13,12 @@ # limitations under the License. from typing import TYPE_CHECKING, Dict, Optional -from pydantic import Extra, StrictInt, StrictStr, constr, validator +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import Extra, StrictInt, StrictStr, constr, validator +else: + from pydantic import Extra, StrictInt, StrictStr, constr, validator from synapse.rest.models import RequestBodyModel from synapse.util.threepids import validate_email diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 0aaa838d04..48c47058db 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -16,7 +16,13 @@ import logging import re from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple -from pydantic import Extra, StrictInt, StrictStr +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import Extra, StrictInt, StrictStr +else: + from pydantic import StrictInt, StrictStr, Extra + from signedjson.sign import sign_json from twisted.web.server import Request diff --git a/synapse/rest/models.py b/synapse/rest/models.py index d47de5c19e..de354a2135 100644 --- a/synapse/rest/models.py +++ b/synapse/rest/models.py @@ -11,7 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from pydantic import BaseModel, Extra +from typing import TYPE_CHECKING + +from synapse._pydantic_compat import HAS_PYDANTIC_V2 + +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import BaseModel, Extra +else: + from pydantic import BaseModel, Extra class RequestBodyModel(BaseModel): diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 99ebd96f84..12829d3d7d 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -31,8 +31,8 @@ from typing import ( ) import attr -from pydantic import BaseModel +from synapse._pydantic_compat import HAS_PYDANTIC_V2 from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection, Cursor @@ -41,6 +41,11 @@ from synapse.util import Clock, json_encoder from . import engines +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import BaseModel +else: + from pydantic import BaseModel + if TYPE_CHECKING: from synapse.server import HomeServer from synapse.storage.database import DatabasePool, LoggingTransaction diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py index 0b8fcb0c47..524ea6047e 100644 --- a/tests/rest/client/test_models.py +++ b/tests/rest/client/test_models.py @@ -12,12 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest as stdlib_unittest +from typing import TYPE_CHECKING -from pydantic import BaseModel, ValidationError from typing_extensions import Literal +from synapse._pydantic_compat import HAS_PYDANTIC_V2 from synapse.rest.client.models import EmailRequestTokenBody +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import BaseModel, ValidationError +else: + from pydantic import BaseModel, ValidationError + class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase): class Model(BaseModel): From 139a24de9ee0e81faece1e375a197123a6e10b67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:39:54 -0400 Subject: [PATCH 518/562] Bump actions/checkout from 3 to 4 (#16250) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/docs-pr.yaml | 4 +- .github/workflows/docs.yaml | 4 +- .github/workflows/latest_deps.yml | 12 +++--- .github/workflows/poetry_lockfile.yaml | 2 +- .github/workflows/push_complement_image.yml | 6 +-- .github/workflows/release-artifacts.yml | 8 ++-- .github/workflows/tests.yml | 44 ++++++++++----------- .github/workflows/twisted_trunk.yml | 12 +++--- 9 files changed, 47 insertions(+), 47 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 12440f10a8..ebad0d4a98 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,7 +30,7 @@ jobs: run: docker buildx inspect - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Extract version from pyproject.toml # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index 6634f2644e..3704bd66e2 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -12,7 +12,7 @@ jobs: name: GitHub Pages runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 @@ -39,7 +39,7 @@ jobs: name: Check links in documentation runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 2bd0f32566..c7cb2d78e5 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -50,7 +50,7 @@ jobs: needs: - pre steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 @@ -80,7 +80,7 @@ jobs: needs: - pre steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Set up Sphinx" uses: matrix-org/setup-python-poetry@v1 diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 7b839f59c1..c9ec70abe9 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -39,7 +39,7 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 @@ -72,7 +72,7 @@ jobs: postgres-version: "14" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable @@ -145,7 +145,7 @@ jobs: BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable @@ -192,8 +192,8 @@ jobs: database: Postgres steps: - - name: Run actions/checkout@v3 for synapse - uses: actions/checkout@v3 + - name: Run actions/checkout@v4 for synapse + uses: actions/checkout@v4 with: path: synapse @@ -222,7 +222,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml index ae4d27f2de..4dd0f7d41f 100644 --- a/.github/workflows/poetry_lockfile.yaml +++ b/.github/workflows/poetry_lockfile.yaml @@ -16,7 +16,7 @@ jobs: name: "Check locked dependencies have sdists" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: '3.x' diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index e994b122cd..6fbd2ed015 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -33,17 +33,17 @@ jobs: packages: write steps: - name: Checkout specific branch (debug build) - uses: actions/checkout@v3 + uses: actions/checkout@v4 if: github.event_name == 'workflow_dispatch' with: ref: ${{ inputs.branch }} - name: Checkout clean copy of develop (scheduled build) - uses: actions/checkout@v3 + uses: actions/checkout@v4 if: github.event_name == 'schedule' with: ref: develop - name: Checkout clean copy of master (on-push) - uses: actions/checkout@v3 + uses: actions/checkout@v4 if: github.event_name == 'push' with: ref: master diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index b1ee3be7ec..fed3a41586 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -27,7 +27,7 @@ jobs: name: "Calculate list of debian distros" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: '3.x' @@ -55,7 +55,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: src @@ -121,7 +121,7 @@ jobs: arch: aarch64 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: @@ -167,7 +167,7 @@ jobs: if: ${{ !startsWith(github.ref, 'refs/pull/') }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: '3.10' diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b5fffb3653..fdc79715ac 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -37,7 +37,7 @@ jobs: check-sampleconfig: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 @@ -52,7 +52,7 @@ jobs: check-schema-delta: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: "3.x" @@ -62,7 +62,7 @@ jobs: check-lockfile: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: "3.x" @@ -72,7 +72,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Poetry uses: matrix-org/setup-python-poetry@v1 @@ -94,7 +94,7 @@ jobs: name: Typechecking steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@1.61.0 @@ -127,7 +127,7 @@ jobs: lint-crlf: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check line endings run: scripts-dev/check_line_terminators.sh @@ -135,7 +135,7 @@ jobs: if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 @@ -150,7 +150,7 @@ jobs: lint-pydantic: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - name: Install Rust @@ -168,7 +168,7 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@1.61.0 @@ -186,7 +186,7 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master @@ -203,7 +203,7 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master @@ -239,7 +239,7 @@ jobs: needs: linting-done runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: "3.x" @@ -258,7 +258,7 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }} @@ -310,7 +310,7 @@ jobs: needs: linting-done runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@1.61.0 @@ -371,7 +371,7 @@ jobs: extras: ["all"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # Install libs necessary for PyPy to build binary wheels for dependencies - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev - uses: matrix-org/setup-python-poetry@v1 @@ -416,7 +416,7 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare test blacklist run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers @@ -461,7 +461,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: sudo apt-get -qq install xmlsec1 postgresql-client - uses: matrix-org/setup-python-poetry@v1 with: @@ -503,7 +503,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Add PostgreSQL apt repository # We need a version of pg_dump that can handle the version of # PostgreSQL being tested against. The Ubuntu package repository lags @@ -555,8 +555,8 @@ jobs: database: Postgres steps: - - name: Run actions/checkout@v3 for synapse - uses: actions/checkout@v3 + - name: Run actions/checkout@v4 for synapse + uses: actions/checkout@v4 with: path: synapse @@ -586,7 +586,7 @@ jobs: - changes steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@1.61.0 @@ -604,7 +604,7 @@ jobs: - changes steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 7d629a4ed0..062f782e8b 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -40,7 +40,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable @@ -64,7 +64,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: sudo apt-get -qq install xmlsec1 - name: Install Rust @@ -108,7 +108,7 @@ jobs: - ${{ github.workspace }}:/src steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable @@ -163,8 +163,8 @@ jobs: database: Postgres steps: - - name: Run actions/checkout@v3 for synapse - uses: actions/checkout@v3 + - name: Run actions/checkout@v4 for synapse + uses: actions/checkout@v4 with: path: synapse @@ -203,7 +203,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From b225acf3e60f2413fd9bc8198ddbecf6d5ad4f84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:48:42 +0100 Subject: [PATCH 519/562] Bump types-psycopg2 from 2.9.21.11 to 2.9.21.14 (#16381) * Bump types-psycopg2 from 2.9.21.11 to 2.9.21.14 Bumps [types-psycopg2](https://github.com/python/typeshed) from 2.9.21.11 to 2.9.21.14. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-psycopg2 dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Relax the annotation of Cursor.description See https://github.com/matrix-org/synapse/pull/16343#issuecomment-1726083384 for rationale. * Changelog * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: David Robertson --- changelog.d/16381.misc | 1 + poetry.lock | 6 +++--- synapse/storage/database.py | 14 +------------- synapse/storage/types.py | 20 ++++---------------- 4 files changed, 9 insertions(+), 32 deletions(-) create mode 100644 changelog.d/16381.misc diff --git a/changelog.d/16381.misc b/changelog.d/16381.misc new file mode 100644 index 0000000000..a454651952 --- /dev/null +++ b/changelog.d/16381.misc @@ -0,0 +1 @@ +Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. diff --git a/poetry.lock b/poetry.lock index 9a8be27b6f..bf229349cb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3093,13 +3093,13 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.11" +version = "2.9.21.14" description = "Typing stubs for psycopg2" optional = false python-versions = "*" files = [ - {file = "types-psycopg2-2.9.21.11.tar.gz", hash = "sha256:d5077eacf90e61db8c0b8eea2fdc9d4a97d7aaa16865fb4bd7034a7571520b4d"}, - {file = "types_psycopg2-2.9.21.11-py3-none-any.whl", hash = "sha256:7a323d7744bc8a882fb5a6f63448e903fc70d3dc0d6da9ec1f9c6c4dc10a7102"}, + {file = "types-psycopg2-2.9.21.14.tar.gz", hash = "sha256:bf73a0ac4da4e278c89bf1b01fc596d5a5ac7a356cfe6ac0249f47b9e259f868"}, + {file = "types_psycopg2-2.9.21.14-py3-none-any.whl", hash = "sha256:cd9c5350631f3bc6184ec8d48f2ed31d4ea660f89d0fffe78239450782f383c5"}, ] [[package]] diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 697bc5651c..ca894edd5a 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -361,19 +361,7 @@ class LoggingTransaction: @property def description( self, - ) -> Optional[ - Sequence[ - Tuple[ - str, - Optional[Any], - Optional[int], - Optional[int], - Optional[int], - Optional[int], - Optional[int], - ] - ] - ]: + ) -> Optional[Sequence[Any]]: return self.txn.description def execute_batch(self, sql: str, args: Iterable[Iterable[Any]]) -> None: diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 34ac807530..afaeef9a5a 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -53,22 +53,10 @@ class Cursor(Protocol): @property def description( self, - ) -> Optional[ - Sequence[ - # Note that this is an approximate typing based on sqlite3 and other - # drivers, and may not be entirely accurate. - # FWIW, the DBAPI 2 spec is: https://peps.python.org/pep-0249/#description - Tuple[ - str, - Optional[Any], - Optional[int], - Optional[int], - Optional[int], - Optional[int], - Optional[int], - ] - ] - ]: + ) -> Optional[Sequence[Any]]: + # At the time of writing, Synapse only assumes that `column[0]: str` for each + # `column in description`. Since this is hard to express in the type system, and + # as this is rarely used in Synapse, we deem `column: Any` good enough. ... @property From 525d9d6e113a19faa2ff38b512231b3c1466aa4f Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 25 Sep 2023 17:59:40 +0100 Subject: [PATCH 520/562] Avoid running CI steps when the files they check have not been changed. (#14745) --- .github/workflows/tests.yml | 96 +++++++++++++++++++++++++++++++------ changelog.d/14745.misc | 1 + 2 files changed, 83 insertions(+), 14 deletions(-) create mode 100644 changelog.d/14745.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fdc79715ac..045d3dd257 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -22,6 +22,9 @@ jobs: runs-on: ubuntu-latest outputs: rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }} + trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }} + integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }} + linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }} steps: - uses: dorny/paths-filter@v2 id: filter @@ -33,9 +36,45 @@ jobs: - 'rust/**' - 'Cargo.toml' - 'Cargo.lock' + - '.rustfmt.toml' + + trial: + - 'synapse/**' + - 'tests/**' + - 'rust/**' + - 'Cargo.toml' + - 'Cargo.lock' + - 'pyproject.toml' + - 'poetry.lock' + + integration: + - 'synapse/**' + - 'rust/**' + - 'docker/**' + - 'Cargo.toml' + - 'Cargo.lock' + - 'pyproject.toml' + - 'poetry.lock' + - 'docker/**' + + linting: + - 'synapse/**' + - 'docker/**' + - 'tests/**' + - 'scripts-dev/**' + - 'contrib/**' + - 'synmark/**' + - 'stubs/**' + - '.ci/**' + - 'mypy.ini' + - 'pyproject.toml' + - 'poetry.lock' check-sampleconfig: runs-on: ubuntu-latest + needs: changes + if: ${{ needs.changes.outputs.linting == 'true' }} + steps: - uses: actions/checkout@v4 - name: Install Rust @@ -51,6 +90,9 @@ jobs: check-schema-delta: runs-on: ubuntu-latest + needs: changes + if: ${{ needs.changes.outputs.linting == 'true' }} + steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v4 @@ -70,6 +112,9 @@ jobs: lint: runs-on: ubuntu-latest + needs: changes + if: ${{ needs.changes.outputs.linting == 'true' }} + steps: - name: Checkout repository uses: actions/checkout@v4 @@ -92,6 +137,9 @@ jobs: lint-mypy: runs-on: ubuntu-latest name: Typechecking + needs: changes + if: ${{ needs.changes.outputs.linting == 'true' }} + steps: - name: Checkout repository uses: actions/checkout@v4 @@ -149,6 +197,9 @@ jobs: lint-pydantic: runs-on: ubuntu-latest + needs: changes + if: ${{ needs.changes.outputs.linting == 'true' }} + steps: - uses: actions/checkout@v4 with: @@ -250,8 +301,10 @@ jobs: sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }} trial: - if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail - needs: calculate-test-jobs + if: ${{ !cancelled() && !failure() && needs.changes.outputs.trial == 'true' }} # Allow previous steps to be skipped, but not fail + needs: + - calculate-test-jobs + - changes runs-on: ubuntu-latest strategy: matrix: @@ -306,8 +359,10 @@ jobs: trial-olddeps: # Note: sqlite only; no postgres - if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail - needs: linting-done + if: ${{ !cancelled() && !failure() && needs.changes.outputs.trial == 'true' }} # Allow previous steps to be skipped, but not fail + needs: + - linting-done + - changes runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 @@ -362,8 +417,10 @@ jobs: trial-pypy: # Very slow; only run if the branch name includes 'pypy' # Note: sqlite only; no postgres. Completely untested since poetry move. - if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }} - needs: linting-done + if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() && needs.changes.outputs.trial == 'true' }} + needs: + - linting-done + - changes runs-on: ubuntu-latest strategy: matrix: @@ -394,8 +451,10 @@ jobs: || true sytest: - if: ${{ !failure() && !cancelled() }} - needs: calculate-test-jobs + if: ${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true' }} + needs: + - calculate-test-jobs + - changes runs-on: ubuntu-latest container: image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }} @@ -476,8 +535,10 @@ jobs: portdb: - if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail - needs: linting-done + if: ${{ !failure() && !cancelled() && needs.changes.outputs.linting == 'true' }} # Allow previous steps to be skipped, but not fail + needs: + - linting-done + - changes runs-on: ubuntu-latest strategy: matrix: @@ -537,8 +598,10 @@ jobs: schema_diff complement: - if: "${{ !failure() && !cancelled() }}" - needs: linting-done + if: "${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true' }}" + needs: + - linting-done + - changes runs-on: ubuntu-latest strategy: @@ -632,9 +695,14 @@ jobs: with: needs: ${{ toJSON(needs) }} - # The newsfile and signoff lints may be skipped on non PR builds - # Cargo test is skipped if there is no changes on Rust code + # Various bits are skipped if there was no applicable changes. + # The newsfile and signoff lint may be skipped on non PR builds. skippable: | + trial + trial-olddeps + sytest + portdb + complement check-signoff lint-newsfile cargo-test diff --git a/changelog.d/14745.misc b/changelog.d/14745.misc new file mode 100644 index 0000000000..eae0501d6b --- /dev/null +++ b/changelog.d/14745.misc @@ -0,0 +1 @@ +Avoid running CI steps when the files they check have not been changed. \ No newline at end of file From 9fd18e9b06692c3bc91c9809cc03ec3a6bc3dade Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 25 Sep 2023 18:43:09 +0100 Subject: [PATCH 521/562] Add developer documentation concerning gradual schema migrations with column alterations. (#15691) Co-authored-by: Eric Eastwood --- changelog.d/15691.doc | 1 + docs/development/database_schema.md | 157 ++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 changelog.d/15691.doc diff --git a/changelog.d/15691.doc b/changelog.d/15691.doc new file mode 100644 index 0000000000..fe649e1027 --- /dev/null +++ b/changelog.d/15691.doc @@ -0,0 +1 @@ +Add developer documentation concerning gradual schema migrations with column alterations. \ No newline at end of file diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index e231be21dd..675080ae1b 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -184,3 +184,160 @@ version `3`, that can only happen with a hash collision, which we basically hope will never happen (SHA256 has a massive big key space). +## Worked examples of gradual migrations + +Some migrations need to be performed gradually. A prime example of this is anything +which would need to do a large table scan — including adding columns, indices or +`NOT NULL` constraints to non-empty tables — such a migration should be done as a +background update where possible, at least on Postgres. +We can afford to be more relaxed about SQLite databases since they are usually +used on smaller deployments and SQLite does not support the same concurrent +DDL operations as Postgres. + +We also typically insist on having at least one Synapse version's worth of +backwards compatibility, so that administrators can roll back Synapse if an upgrade +did not go smoothly. + +This sometimes results in having to plan a migration across multiple versions +of Synapse. + +This section includes an example and may include more in the future. + + + +### Transforming a column into another one, with `NOT NULL` constraints + +This example illustrates how you would introduce a new column, write data into it +based on data from an old column and then drop the old column. + +We are aiming for semantic equivalence to: + +```sql +ALTER TABLE mytable ADD COLUMN new_column INTEGER; +UPDATE mytable SET new_column = old_column * 100; +ALTER TABLE mytable ALTER COLUMN new_column ADD CONSTRAINT NOT NULL; +ALTER TABLE mytable DROP COLUMN old_column; +``` + +#### Synapse version `N` + +```python +SCHEMA_VERSION = S +SCHEMA_COMPAT_VERSION = ... # unimportant at this stage +``` + +**Invariants:** +1. `old_column` is read by Synapse and written to by Synapse. + + +#### Synapse version `N + 1` + +```python +SCHEMA_VERSION = S + 1 +SCHEMA_COMPAT_VERSION = ... # unimportant at this stage +``` + +**Changes:** +1. + ```sql + ALTER TABLE mytable ADD COLUMN new_column INTEGER; + ``` + +**Invariants:** +1. `old_column` is read by Synapse and written to by Synapse. +2. `new_column` is written to by Synapse. + +**Notes:** +1. `new_column` can't have a `NOT NULL NOT VALID` constraint yet, because the previous Synapse version did not write to the new column (since we haven't bumped the `SCHEMA_COMPAT_VERSION` yet, we still need to be compatible with the previous version). + + +#### Synapse version `N + 2` + +```python +SCHEMA_VERSION = S + 2 +SCHEMA_COMPAT_VERSION = S + 1 # this signals that we can't roll back to a time before new_column existed +``` + +**Changes:** +1. On Postgres, add a `NOT VALID` constraint to ensure new rows are compliant. *SQLite does not have such a construct, but it would be unnecessary anyway since there is no way to concurrently perform this migration on SQLite.* + ```sql + ALTER TABLE mytable ADD CONSTRAINT CHECK new_column_not_null (new_column IS NOT NULL) NOT VALID; + ``` +2. Start a background update to perform migration: it should gradually run e.g. + ```sql + UPDATE mytable SET new_column = old_column * 100 WHERE 0 < mytable_id AND mytable_id <= 5; + ``` + This background update is technically pointless on SQLite, but you must schedule it anyway so that the `portdb` script to migrate to Postgres still works. +3. Upon completion of the background update, you should run `VALIDATE CONSTRAINT` on Postgres to turn the `NOT VALID` constraint into a valid one. + ```sql + ALTER TABLE mytable VALIDATE CONSTRAINT new_column_not_null; + ``` + This will take some time but does **NOT** hold an exclusive lock over the table. + +**Invariants:** +1. `old_column` is read by Synapse and written to by Synapse. +2. `new_column` is written to by Synapse and new rows always have a non-`NULL` value in this field. + + +**Notes:** +1. If you wish, you can convert the `CHECK (new_column IS NOT NULL)` to a `NOT NULL` constraint free of charge in Postgres by adding the `NOT NULL` constraint and then dropping the `CHECK` constraint, because Postgres can statically verify that the `NOT NULL` constraint is implied by the `CHECK` constraint without performing a table scan. +2. It might be tempting to make version `N + 2` redundant by moving the background update to `N + 1` and delaying adding the `NOT NULL` constraint to `N + 3`, but that would mean the constraint would always be validated in the foreground in `N + 3`. Whereas if the `N + 2` step is kept, the migration in `N + 3` would be fast in the happy case. + +#### Synapse version `N + 3` + +```python +SCHEMA_VERSION = S + 3 +SCHEMA_COMPAT_VERSION = S + 1 # we can't roll back to a time before new_column existed +``` + +**Changes:** +1. (Postgres) Update the table to populate values of `new_column` in case the background update had not completed. Additionally, `VALIDATE CONSTRAINT` to make the check fully valid. + ```sql + -- you ideally want an index on `new_column` or e.g. `(new_column) WHERE new_column IS NULL` first, or perhaps you can find a way to skip this if the `NOT NULL` constraint has already been validated. + UPDATE mytable SET new_column = old_column * 100 WHERE new_column IS NULL; + + -- this is a no-op if it already ran as part of the background update + ALTER TABLE mytable VALIDATE CONSTRAINT new_column_not_null; + ``` +2. (SQLite) Recreate the table by precisely following [the 12-step procedure for SQLite table schema changes](https://www.sqlite.org/lang_altertable.html#otheralter). + During this table rewrite, you should recreate `new_column` as `NOT NULL` and populate any outstanding `NULL` values at the same time. + Unfortunately, you can't drop `old_column` yet because it must be present for compatibility with the Postgres schema, as needed by `portdb`. + (Otherwise you could do this all in one go with SQLite!) + +**Invariants:** +1. `old_column` is written to by Synapse (but no longer read by Synapse!). +2. `new_column` is read by Synapse and written to by Synapse. Moreover, all rows have a non-`NULL` value in this field, as guaranteed by a schema constraint. + +**Notes:** +1. We can't drop `old_column` yet, or even stop writing to it, because that would break a rollback to the previous version of Synapse. +2. Application code can now rely on `new_column` being populated. The remaining steps are only motivated by the wish to clean-up old columns. + + +#### Synapse version `N + 4` + +```python +SCHEMA_VERSION = S + 4 +SCHEMA_COMPAT_VERSION = S + 3 # we can't roll back to a time before new_column was entirely non-NULL +``` + +**Invariants:** +1. `old_column` exists but is not written to or read from by Synapse. +2. `new_column` is read by Synapse and written to by Synapse. Moreover, all rows have a non-`NULL` value in this field, as guaranteed by a schema constraint. + +**Notes:** +1. We can't drop `old_column` yet because that would break a rollback to the previous version of Synapse. \ + **TODO:** It may be possible to relax this and drop the column straight away as long as the previous version of Synapse detected a rollback occurred and stopped attempting to write to the column. This could possibly be done by checking whether the database's schema compatibility version was `S + 3`. + + +#### Synapse version `N + 5` + +```python +SCHEMA_VERSION = S + 5 +SCHEMA_COMPAT_VERSION = S + 4 # we can't roll back to a time before old_column was no longer being touched +``` + +**Changes:** +1. + ```sql + ALTER TABLE mytable DROP COLUMN old_column; + ``` From 2763c49eca483dbb848b70b951891afd57016f17 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 25 Sep 2023 14:50:47 -0400 Subject: [PATCH 522/562] Improve comments in StateGroupBackgroundUpdateStore. (#16383) --- changelog.d/16383.misc | 1 + synapse/storage/databases/state/bg_updates.py | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16383.misc diff --git a/changelog.d/16383.misc b/changelog.d/16383.misc new file mode 100644 index 0000000000..d8d84cc184 --- /dev/null +++ b/changelog.d/16383.misc @@ -0,0 +1 @@ +Improve comments in `StateGroupBackgroundUpdateStore`. diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 5b8ba436d4..6ff533a129 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -94,6 +94,18 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): groups: List[int], state_filter: Optional[StateFilter] = None, ) -> Mapping[int, StateMap[str]]: + """ + Given a number of state groups, fetch the latest state for each group. + + Args: + txn: The transaction object. + groups: The given state groups that you want to fetch the latest state for. + state_filter: The state filter to apply the state we fetch state from the database. + + Returns: + Map from state_group to a StateMap at that point. + """ + state_filter = state_filter or StateFilter.all() results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups} @@ -206,8 +218,10 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): if where_clause: where_clause = " AND (%s)" % (where_clause,) - # We don't use WITH RECURSIVE on sqlite3 as there are distributions - # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) + # XXX: We could `WITH RECURSIVE` here since it's supported on SQLite 3.8.3 + # or higher and our minimum supported version is greater than that. + # + # We just haven't put in the time to refactor this. for group in groups: next_group: Optional[int] = group From 47ffc7e5482cc8d7bc376f362f8db5baddbcf4b3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Sep 2023 13:49:44 +0300 Subject: [PATCH 523/562] Reduce calls to `send_presence_to_destinations` (#16385) --- changelog.d/16385.misc | 1 + synapse/handlers/presence.py | 33 ++++++++++++++++++--------------- 2 files changed, 19 insertions(+), 15 deletions(-) create mode 100644 changelog.d/16385.misc diff --git a/changelog.d/16385.misc b/changelog.d/16385.misc new file mode 100644 index 0000000000..d439a931d6 --- /dev/null +++ b/changelog.d/16385.misc @@ -0,0 +1 @@ +Minor performance improvement when sending presence to federated servers. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 375c7d0901..7c7cda3e95 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -401,9 +401,9 @@ class BasePresenceHandler(abc.ABC): states, ) - for destination, host_states in hosts_to_states.items(): + for destinations, host_states in hosts_to_states: await self._federation.send_presence_to_destinations( - host_states, [destination] + host_states, destinations ) async def send_full_presence_to_users(self, user_ids: StrCollection) -> None: @@ -1000,9 +1000,9 @@ class PresenceHandler(BasePresenceHandler): list(to_federation_ping.values()), ) - for destination, states in hosts_to_states.items(): + for destinations, states in hosts_to_states: await self._federation_queue.send_presence_to_destinations( - states, [destination] + states, destinations ) @wrap_as_background_process("handle_presence_timeouts") @@ -2276,7 +2276,7 @@ async def get_interested_remotes( store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState], -) -> Dict[str, Set[UserPresenceState]]: +) -> List[Tuple[StrCollection, Collection[UserPresenceState]]]: """Given a list of presence states figure out which remote servers should be sent which. @@ -2290,23 +2290,26 @@ async def get_interested_remotes( Returns: A map from destinations to presence states to send to that destination. """ - hosts_and_states: Dict[str, Set[UserPresenceState]] = {} + hosts_and_states: List[Tuple[StrCollection, Collection[UserPresenceState]]] = [] # First we look up the rooms each user is in (as well as any explicit # subscriptions), then for each distinct room we look up the remote # hosts in those rooms. - room_ids_to_states, users_to_states = await get_interested_parties( - store, presence_router, states - ) + for state in states: + room_ids = await store.get_rooms_for_user(state.user_id) + hosts: Set[str] = set() + for room_id in room_ids: + room_hosts = await store.get_current_hosts_in_room(room_id) + hosts.update(room_hosts) + hosts_and_states.append((hosts, [state])) - for room_id, states in room_ids_to_states.items(): - hosts = await store.get_current_hosts_in_room(room_id) - for host in hosts: - hosts_and_states.setdefault(host, set()).update(states) + # Ask a presence routing module for any additional parties if one + # is loaded. + router_users_to_states = await presence_router.get_users_for_states(states) - for user_id, states in users_to_states.items(): + for user_id, user_states in router_users_to_states.items(): host = get_domain_from_id(user_id) - hosts_and_states.setdefault(host, set()).update(states) + hosts_and_states.append(([host], user_states)) return hosts_and_states From ec1c709440d99b0d8042b669e17c899cd6fd8b84 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 26 Sep 2023 09:44:38 -0400 Subject: [PATCH 524/562] Add documentation about the user directory search algorithm (#16320) --- changelog.d/16320.doc | 1 + docs/user_directory.md | 134 +++++++++++++++++++++++++++++++++-------- 2 files changed, 110 insertions(+), 25 deletions(-) create mode 100644 changelog.d/16320.doc diff --git a/changelog.d/16320.doc b/changelog.d/16320.doc new file mode 100644 index 0000000000..53e42df56f --- /dev/null +++ b/changelog.d/16320.doc @@ -0,0 +1 @@ +Improve documentation of the user directory search algorithm. diff --git a/docs/user_directory.md b/docs/user_directory.md index c4794b04cf..b33fd2bc2a 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -1,49 +1,133 @@ -User Directory API Implementation -================================= +# User Directory API Implementation -The user directory is currently maintained based on the 'visible' users -on this particular server - i.e. ones which your account shares a room with, or -who are present in a publicly viewable room present on the server. +The user directory is maintained based on users that are 'visible' to the homeserver - +i.e. ones which are local to the server and ones which any local user shares a +room with. -The directory info is stored in various tables, which can (typically after -DB corruption) get stale or out of sync. If this happens, for now the +The directory info is stored in various tables, which can sometimes get out of +sync (although this is considered a bug). If this happens, for now the solution to fix it is to use the [admin API](usage/administration/admin_api/background_updates.md#run) and execute the job `regenerate_directory`. This should then start a background task to -flush the current tables and regenerate the directory. +flush the current tables and regenerate the directory. Depending on the size +of your homeserver (number of users and rooms) this can take a while. -Data model ----------- +## Data model There are five relevant tables that collectively form the "user directory". -Three of them track a master list of all the users we could search for. -The last two (collectively called the "search tables") track who can -see who. +Three of them track a list of all known users. The last two (collectively called +the "search tables") track which users are visible to each other. From all of these tables we exclude three types of local user: - - support users - - appservice users - - deactivated users -* `user_directory`. This contains the user_id, display name and avatar we'll - return when you search the directory. - - Because there's only one directory entry per user, it's important that we only - ever put publicly visible names here. Otherwise we might leak a private +- support users +- appservice users +- deactivated users + +A description of each table follows: + +* `user_directory`. This contains the user ID, display name and avatar of each user. + - Because there is only one directory entry per user, it is important that it + only contain publicly visible information. Otherwise, this will leak the nickname or avatar used in a private room. - Indexed on rooms. Indexed on users. * `user_directory_search`. To be joined to `user_directory`. It contains an extra - column that enables full text search based on user ids and display names. - Different schemas for SQLite and Postgres with different code paths to match. + column that enables full text search based on user IDs and display names. + Different schemas for SQLite and Postgres are used. - Indexed on the full text search data. Indexed on users. * `user_directory_stream_pos`. When the initial background update to populate the directory is complete, we record a stream position here. This indicates that synapse should now listen for room changes and incrementally update - the directory where necessary. + the directory where necessary. (See [stream positions](development/synapse_architecture/streams.html).) -* `users_in_public_rooms`. Contains associations between users and the public rooms they're in. - Used to determine which users are in public rooms and should be publicly visible in the directory. +* `users_in_public_rooms`. Contains associations between users and the public + rooms they're in. Used to determine which users are in public rooms and should + be publicly visible in the directory. Both local and remote users are tracked. * `users_who_share_private_rooms`. Rows are triples `(L, M, room id)` where `L` is a local user and `M` is a local or remote user. `L` and `M` should be different, but this isn't enforced by a constraint. + + Note that if two local users share a room then there will be two entries: + `(user1, user2, !room_id)` and `(user2, user1, !room_id)`. + +## Configuration options + +The exact way user search works can be tweaked via some server-level +[configuration options](usage/configuration/config_documentation.md#user_directory). + +The information is not repeated here, but the options are mentioned below. + +## Search algorithm + +If `search_all_users` is `false`, then results are limited to users who: + +1. Are found in the `users_in_public_rooms` table, or +2. Are found in the `users_who_share_private_rooms` where `L` is the requesting + user and `M` is the search result. + +Otherwise, if `search_all_users` is `true`, no such limits are placed and all +users known to the server (matching the search query) will be returned. + +By default, locked users are not returned. If `show_locked_users` is `true` then +no filtering on the locked status of a user is done. + +The user provided search term is lowercased and normalized using [NFKC](https://en.wikipedia.org/wiki/Unicode_equivalence#Normalization), +this treats the string as case-insensitive, canonicalizes different forms of the +same text, and maps some "roughly equivalent" characters together. + +The search term is then split into words: + +* If [ICU](https://en.wikipedia.org/wiki/International_Components_for_Unicode) is + available, then the system's [default locale](https://unicode-org.github.io/icu/userguide/locale/#default-locales) + will be used to break the search term into words. (See the + [installation instructions](setup/installation.md) for how to install ICU.) +* If unavailable, then runs of ASCII characters, numbers, underscores, and hypens + are considered words. + +The queries for PostgreSQL and SQLite are detailed below, by their overall goal +is to find matching users, preferring users who are "real" (e.g. not bots, +not deactivated). It is assumed that real users will have an display name and +avatar set. + +### PostgreSQL + +The above words are then transformed into two queries: + +1. "exact" which matches the parsed words exactly (using [`to_tsquery`](https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-PARSING-QUERIES)); +2. "prefix" which matches the parsed words as prefixes (using `to_tsquery`). + +Results are composed of all rows in the `user_directory_search` table whose information +matches one (or both) of these queries. Results are ordered by calculating a weighted +score for each result, higher scores are returned first: + +* 4x if a user ID exists. +* 1.2x if the user has a display name set. +* 1.2x if the user has an avatar set. +* 0x-3x by the full text search results using the [`ts_rank_cd` function](https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-RANKING) + against the "exact" search query; this has four variables with the following weightings: + * `D`: 0.1 for the user ID's domain + * `C`: 0.1 for unused + * `B`: 0.9 for the user's display name (or an empty string if it is not set) + * `A`: 0.1 for the user ID's localpart +* 0x-1x by the full text search results using the `ts_rank_cd` function against the + "prefix" search query. (Using the same weightings as above.) +* If `prefer_local_users` is `true`, then 2x if the user is local to the homeserver. + +Note that `ts_rank_cd` returns a weight between 0 and 1. The initial weighting of +all results is 1. + +### SQLite + +Results are composed of all rows in the `user_directory_search` whose information +matches the query. Results are ordered by the following information, with each +subsequent column used as a tiebreaker, for each result: + +1. By the [`rank`](https://www.sqlite.org/windowfunctions.html#built_in_window_functions) + of the full text search results using the [`matchinfo` function](https://www.sqlite.org/fts3.html#matchinfo). Higher + ranks are returned first. +2. If `prefer_local_users` is `true`, then users local to the homeserver are + returned first. +3. Users with a display name set are returned first. +4. Users with an avatar set are returned first. From 06f650f5f4578fc4303abbdd502585d1c8e4481e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 26 Sep 2023 15:21:07 +0100 Subject: [PATCH 525/562] Skip export-data on non-code (e.g. docs) PRs (#16387) --- .github/workflows/tests.yml | 7 ++++--- changelog.d/16387.misc | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16387.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 045d3dd257..96750cb6c8 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -499,8 +499,8 @@ jobs: /logs/**/*.log* export-data: - if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail - needs: [linting-done, portdb] + if: ${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true'}} # Allow previous steps to be skipped, but not fail + needs: [linting-done, portdb, changes] runs-on: ubuntu-latest env: TOP: ${{ github.workspace }} @@ -535,7 +535,7 @@ jobs: portdb: - if: ${{ !failure() && !cancelled() && needs.changes.outputs.linting == 'true' }} # Allow previous steps to be skipped, but not fail + if: ${{ !failure() && !cancelled() && needs.changes.outputs.integration == 'true'}} # Allow previous steps to be skipped, but not fail needs: - linting-done - changes @@ -702,6 +702,7 @@ jobs: trial-olddeps sytest portdb + export-data complement check-signoff lint-newsfile diff --git a/changelog.d/16387.misc b/changelog.d/16387.misc new file mode 100644 index 0000000000..eae0501d6b --- /dev/null +++ b/changelog.d/16387.misc @@ -0,0 +1 @@ +Avoid running CI steps when the files they check have not been changed. \ No newline at end of file From 88ba67eb91215a708f321e16559fe3c2c0d0a407 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Sep 2023 15:56:54 +0100 Subject: [PATCH 526/562] 1.93.0 --- CHANGES.md | 20 ++++++++++++++++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index eb537f9f6a..c1ea40de20 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,23 @@ +# Synapse 1.93.0 (2023-09-26) + +No significant changes since 1.93.0rc1. + + +## Security advisory + +The following issues are fixed in 1.93.0 (and RCs). + +- [GHSA-4f74-84v3-j9q5](https://github.com/matrix-org/synapse/security/advisories/GHSA-4f74-84v3-j9q5) / [CVE-2023-41335](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-41335) — Low Severity + + Temporary storage of plaintext passwords during password changes. + +- [GHSA-7565-cq32-vx2x](https://github.com/matrix-org/synapse/security/advisories/GHSA-7565-cq32-vx2x) / [CVE-2023-42453](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-42453) — Low Severity + + Improper validation of receipts allows forged read receipts. + +See the advisories for more details. If you have any questions, email security@matrix.org. + + # Synapse 1.93.0rc1 (2023-09-19) ### Features diff --git a/debian/changelog b/debian/changelog index 192eedd45c..7be71019b4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.93.0) stable; urgency=medium + + * New Synapse release 1.93.0. + + -- Synapse Packaging team Tue, 26 Sep 2023 15:54:40 +0100 + matrix-synapse-py3 (1.93.0~rc1) stable; urgency=medium * New synapse release 1.93.0rc1. diff --git a/pyproject.toml b/pyproject.toml index f69336a73f..5a1b0ec437 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,7 +95,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.93.0rc1" +version = "1.93.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 17800a0e9779a1cfd7c9dff79ae331adf8f44f83 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 26 Sep 2023 11:52:19 -0400 Subject: [PATCH 527/562] Implement MSC4028: push all encrypted events. (#16361) This unstable push rule is implemented behind an experimental configuration flag. --- changelog.d/16361.feature | 1 + rust/benches/evaluator.rs | 1 + rust/src/push/base_rules.rs | 13 +++++++++++++ rust/src/push/evaluator.rs | 2 +- rust/src/push/mod.rs | 9 +++++++++ stubs/synapse/synapse_rust/push.pyi | 1 + synapse/config/experimental.py | 4 ++++ synapse/storage/databases/main/push_rule.py | 1 + 8 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16361.feature diff --git a/changelog.d/16361.feature b/changelog.d/16361.feature new file mode 100644 index 0000000000..632fff789b --- /dev/null +++ b/changelog.d/16361.feature @@ -0,0 +1 @@ +Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 14071105a0..6e1eab2a3b 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -197,6 +197,7 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, + false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 59fd27665a..cebc2c079b 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -63,6 +63,19 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule { }]; pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ + PushRule { + rule_id: Cow::Borrowed("global/override/.org.matrix.msc4028.encrypted_event"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Cow::Borrowed("m.room.encrypted"), + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify]), + default: true, + default_enabled: false, + }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"), priority_class: 5, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 5b9bf9b26a..48e670478b 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -564,7 +564,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 8e91f506cc..5e1e8e1abb 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -527,6 +527,7 @@ pub struct FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, + msc4028_push_encrypted_events: bool, } #[pymethods] @@ -538,6 +539,7 @@ impl FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, + msc4028_push_encrypted_events: bool, ) -> Self { Self { push_rules, @@ -545,6 +547,7 @@ impl FilteredPushRules { msc1767_enabled, msc3381_polls_enabled, msc3664_enabled, + msc4028_push_encrypted_events, } } @@ -581,6 +584,12 @@ impl FilteredPushRules { return false; } + if !self.msc4028_push_encrypted_events + && rule.rule_id == "global/override/.org.matrix.msc4028.encrypted_event" + { + return false; + } + true }) .map(|r| { diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 1f432d4ecf..25259ce91d 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -46,6 +46,7 @@ class FilteredPushRules: msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, + msc4028_push_encrypted_events: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index cabe0d4397..9f830e7094 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -415,3 +415,7 @@ class ExperimentalConfig(Config): LimitExceededError.include_retry_after_header = experimental.get( "msc4041_enabled", False ) + + self.msc4028_push_encrypted_events = experimental.get( + "msc4028_push_encrypted_events", False + ) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index af69944008..923166974c 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -88,6 +88,7 @@ def _load_rules( msc1767_enabled=experimental_config.msc1767_enabled, msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, + msc4028_push_encrypted_events=experimental_config.msc4028_push_encrypted_events, ) return filtered_rules From f84da3c32ec74cf054e2fd6d10618aa4997cffaa Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 26 Sep 2023 11:57:50 -0400 Subject: [PATCH 528/562] Add a cache around server ACL checking (#16360) * Pre-compiles the server ACLs onto an object per room and invalidates them when new events come in. * Converts the server ACL checking into Rust. --- changelog.d/16360.misc | 1 + rust/src/acl/mod.rs | 102 +++++++++++++++++++++ rust/src/lib.rs | 2 + stubs/synapse/synapse_rust/acl.pyi | 21 +++++ synapse/events/validator.py | 7 +- synapse/federation/federation_server.py | 76 ++------------- synapse/handlers/federation_event.py | 6 ++ synapse/handlers/message.py | 5 + synapse/replication/tcp/client.py | 6 ++ synapse/storage/controllers/state.py | 59 ++++++++++++ tests/federation/test_federation_server.py | 35 ++++--- 11 files changed, 235 insertions(+), 85 deletions(-) create mode 100644 changelog.d/16360.misc create mode 100644 rust/src/acl/mod.rs create mode 100644 stubs/synapse/synapse_rust/acl.pyi diff --git a/changelog.d/16360.misc b/changelog.d/16360.misc new file mode 100644 index 0000000000..b32d7b521e --- /dev/null +++ b/changelog.d/16360.misc @@ -0,0 +1 @@ +Cache server ACL checking. diff --git a/rust/src/acl/mod.rs b/rust/src/acl/mod.rs new file mode 100644 index 0000000000..071f2b7732 --- /dev/null +++ b/rust/src/acl/mod.rs @@ -0,0 +1,102 @@ +// Copyright 2023 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! An implementation of Matrix server ACL rules. + +use std::net::Ipv4Addr; +use std::str::FromStr; + +use anyhow::Error; +use pyo3::prelude::*; +use regex::Regex; + +use crate::push::utils::{glob_to_regex, GlobMatchType}; + +/// Called when registering modules with python. +pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> { + let child_module = PyModule::new(py, "acl")?; + child_module.add_class::()?; + + m.add_submodule(child_module)?; + + // We need to manually add the module to sys.modules to make `from + // synapse.synapse_rust import acl` work. + py.import("sys")? + .getattr("modules")? + .set_item("synapse.synapse_rust.acl", child_module)?; + + Ok(()) +} + +#[derive(Debug, Clone)] +#[pyclass(frozen)] +pub struct ServerAclEvaluator { + allow_ip_literals: bool, + allow: Vec, + deny: Vec, +} + +#[pymethods] +impl ServerAclEvaluator { + #[new] + pub fn py_new( + allow_ip_literals: bool, + allow: Vec<&str>, + deny: Vec<&str>, + ) -> Result { + let allow = allow + .iter() + .map(|s| glob_to_regex(s, GlobMatchType::Whole)) + .collect::>()?; + let deny = deny + .iter() + .map(|s| glob_to_regex(s, GlobMatchType::Whole)) + .collect::>()?; + + Ok(ServerAclEvaluator { + allow_ip_literals, + allow, + deny, + }) + } + + pub fn server_matches_acl_event(&self, server_name: &str) -> bool { + // first of all, check if literal IPs are blocked, and if so, whether the + // server name is a literal IP + if !self.allow_ip_literals { + // check for ipv6 literals. These start with '['. + if server_name.starts_with('[') { + return false; + } + + // check for ipv4 literals. We can just lift the routine from std::net. + if Ipv4Addr::from_str(server_name).is_ok() { + return false; + } + } + + // next, check the deny list + if self.deny.iter().any(|e| e.is_match(server_name)) { + return false; + } + + // then the allow list. + if self.allow.iter().any(|e| e.is_match(server_name)) { + return true; + } + + // everything else should be rejected. + false + } +} diff --git a/rust/src/lib.rs b/rust/src/lib.rs index ce67f58611..c44c09bda7 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -2,6 +2,7 @@ use lazy_static::lazy_static; use pyo3::prelude::*; use pyo3_log::ResetHandle; +pub mod acl; pub mod push; lazy_static! { @@ -38,6 +39,7 @@ fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?; m.add_function(wrap_pyfunction!(reset_logging_config, m)?)?; + acl::register_module(py, m)?; push::register_module(py, m)?; Ok(()) diff --git a/stubs/synapse/synapse_rust/acl.pyi b/stubs/synapse/synapse_rust/acl.pyi new file mode 100644 index 0000000000..e03989b627 --- /dev/null +++ b/stubs/synapse/synapse_rust/acl.pyi @@ -0,0 +1,21 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List + +class ServerAclEvaluator: + def __init__( + self, allow_ip_literals: bool, allow: List[str], deny: List[str] + ) -> None: ... + def server_matches_acl_event(self, server_name: str) -> bool: ... diff --git a/synapse/events/validator.py b/synapse/events/validator.py index a637fadfab..83d9fb5813 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -39,9 +39,9 @@ from synapse.events.utils import ( CANONICALJSON_MIN_INT, validate_canonicaljson, ) -from synapse.federation.federation_server import server_matches_acl_event from synapse.http.servlet import validate_json_object from synapse.rest.models import RequestBodyModel +from synapse.storage.controllers.state import server_acl_evaluator_from_event from synapse.types import EventID, JsonDict, RoomID, StrCollection, UserID @@ -106,7 +106,10 @@ class EventValidator: self._validate_retention(event) elif event.type == EventTypes.ServerACL: - if not server_matches_acl_event(config.server.server_name, event): + server_acl_evaluator = server_acl_evaluator_from_event(event) + if not server_acl_evaluator.server_matches_acl_event( + config.server.server_name + ): raise SynapseError( 400, "Can't create an ACL event that denies the local server" ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index f9915e5a3f..ec8e770430 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -29,10 +29,8 @@ from typing import ( Union, ) -from matrix_common.regex import glob_to_regex from prometheus_client import Counter, Gauge, Histogram -from twisted.internet.abstract import isIPAddress from twisted.python import failure from synapse.api.constants import ( @@ -1324,75 +1322,13 @@ class FederationServer(FederationBase): Raises: AuthError if the server does not match the ACL """ - acl_event = await self._storage_controllers.state.get_current_state_event( - room_id, EventTypes.ServerACL, "" + server_acl_evaluator = ( + await self._storage_controllers.state.get_server_acl_for_room(room_id) ) - if not acl_event or server_matches_acl_event(server_name, acl_event): - return - - raise AuthError(code=403, msg="Server is banned from room") - - -def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool: - """Check if the given server is allowed by the ACL event - - Args: - server_name: name of server, without any port part - acl_event: m.room.server_acl event - - Returns: - True if this server is allowed by the ACLs - """ - logger.debug("Checking %s against acl %s", server_name, acl_event.content) - - # first of all, check if literal IPs are blocked, and if so, whether the - # server name is a literal IP - allow_ip_literals = acl_event.content.get("allow_ip_literals", True) - if not isinstance(allow_ip_literals, bool): - logger.warning("Ignoring non-bool allow_ip_literals flag") - allow_ip_literals = True - if not allow_ip_literals: - # check for ipv6 literals. These start with '['. - if server_name[0] == "[": - return False - - # check for ipv4 literals. We can just lift the routine from twisted. - if isIPAddress(server_name): - return False - - # next, check the deny list - deny = acl_event.content.get("deny", []) - if not isinstance(deny, (list, tuple)): - logger.warning("Ignoring non-list deny ACL %s", deny) - deny = [] - for e in deny: - if _acl_entry_matches(server_name, e): - # logger.info("%s matched deny rule %s", server_name, e) - return False - - # then the allow list. - allow = acl_event.content.get("allow", []) - if not isinstance(allow, (list, tuple)): - logger.warning("Ignoring non-list allow ACL %s", allow) - allow = [] - for e in allow: - if _acl_entry_matches(server_name, e): - # logger.info("%s matched allow rule %s", server_name, e) - return True - - # everything else should be rejected. - # logger.info("%s fell through", server_name) - return False - - -def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool: - if not isinstance(acl_entry, str): - logger.warning( - "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry) - ) - return False - regex = glob_to_regex(acl_entry) - return bool(regex.match(server_name)) + if server_acl_evaluator and not server_acl_evaluator.server_matches_acl_event( + server_name + ): + raise AuthError(code=403, msg="Server is banned from room") class FederationHandlerRegistry: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 7c62cdfaef..0cc8e990d9 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -2342,6 +2342,12 @@ class FederationEventHandler: # TODO retrieve the previous state, and exclude join -> join transitions self._notifier.notify_user_joined_room(event.event_id, event.room_id) + # If this is a server ACL event, clear the cache in the storage controller. + if event.type == EventTypes.ServerACL: + self._state_storage_controller.get_server_acl_for_room.invalidate( + (event.room_id,) + ) + def _sanity_check_event(self, ev: EventBase) -> None: """ Do some early sanity checks of a received event diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c036578a3d..44dbbf81dd 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1730,6 +1730,11 @@ class EventCreationHandler: event.event_id, event.room_id ) + if event.type == EventTypes.ServerACL: + self._storage_controllers.state.get_server_acl_for_room.invalidate( + (event.room_id,) + ) + await self._maybe_kick_guest_users(event, context) if event.type == EventTypes.CanonicalAlias: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index ca8a76f77c..1c7946522a 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -205,6 +205,12 @@ class ReplicationDataHandler: self.notifier.notify_user_joined_room( row.data.event_id, row.data.room_id ) + + # If this is a server ACL event, clear the cache in the storage controller. + if row.data.type == EventTypes.ServerACL: + self._state_storage_controller.get_server_acl_for_room.invalidate( + (row.data.room_id,) + ) elif stream_name == UnPartialStatedRoomStream.NAME: for row in rows: assert isinstance(row, UnPartialStatedRoomStreamRow) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 10d219c045..46957723a1 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -37,6 +37,7 @@ from synapse.storage.util.partial_state_events_tracker import ( PartialCurrentStateTracker, PartialStateEventsTracker, ) +from synapse.synapse_rust.acl import ServerAclEvaluator from synapse.types import MutableStateMap, StateMap, get_domain_from_id from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer @@ -501,6 +502,31 @@ class StateStorageController: return event.content.get("alias") + @cached() + async def get_server_acl_for_room( + self, room_id: str + ) -> Optional[ServerAclEvaluator]: + """Get the server ACL evaluator for room, if any + + This does up-front parsing of the content to ignore bad data and pre-compile + regular expressions. + + Args: + room_id: The room ID + + Returns: + The server ACL evaluator, if any + """ + + acl_event = await self.get_current_state_event( + room_id, EventTypes.ServerACL, "" + ) + + if not acl_event: + return None + + return server_acl_evaluator_from_event(acl_event) + @trace @tag_args async def get_current_state_deltas( @@ -760,3 +786,36 @@ class StateStorageController: cache.state_group = object() return frozenset(cache.hosts_to_joined_users) + + +def server_acl_evaluator_from_event(acl_event: EventBase) -> "ServerAclEvaluator": + """ + Create a ServerAclEvaluator from a m.room.server_acl event's content. + + This does up-front parsing of the content to ignore bad data. It then creates + the ServerAclEvaluator which will pre-compile regular expressions from the globs. + """ + + # first of all, parse if literal IPs are blocked. + allow_ip_literals = acl_event.content.get("allow_ip_literals", True) + if not isinstance(allow_ip_literals, bool): + logger.warning("Ignoring non-bool allow_ip_literals flag") + allow_ip_literals = True + + # next, parse the deny list by ignoring any non-strings. + deny = acl_event.content.get("deny", []) + if not isinstance(deny, (list, tuple)): + logger.warning("Ignoring non-list deny ACL %s", deny) + deny = [] + else: + deny = [s for s in deny if isinstance(s, str)] + + # then the allow list. + allow = acl_event.content.get("allow", []) + if not isinstance(allow, (list, tuple)): + logger.warning("Ignoring non-list allow ACL %s", allow) + allow = [] + else: + allow = [s for s in allow if isinstance(s, str)] + + return ServerAclEvaluator(allow_ip_literals, allow, deny) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 5c850d1843..1831a5b47a 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -22,10 +22,10 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.events import EventBase, make_event_from_dict -from synapse.federation.federation_server import server_matches_acl_event from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer +from synapse.storage.controllers.state import server_acl_evaluator_from_event from synapse.types import JsonDict from synapse.util import Clock @@ -67,37 +67,46 @@ class ServerACLsTestCase(unittest.TestCase): e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]}) logging.info("ACL event: %s", e.content) - self.assertFalse(server_matches_acl_event("evil.com", e)) - self.assertFalse(server_matches_acl_event("EVIL.COM", e)) + server_acl_evalutor = server_acl_evaluator_from_event(e) - self.assertTrue(server_matches_acl_event("evil.com.au", e)) - self.assertTrue(server_matches_acl_event("honestly.not.evil.com", e)) + self.assertFalse(server_acl_evalutor.server_matches_acl_event("evil.com")) + self.assertFalse(server_acl_evalutor.server_matches_acl_event("EVIL.COM")) + + self.assertTrue(server_acl_evalutor.server_matches_acl_event("evil.com.au")) + self.assertTrue( + server_acl_evalutor.server_matches_acl_event("honestly.not.evil.com") + ) def test_block_ip_literals(self) -> None: e = _create_acl_event({"allow_ip_literals": False, "allow": ["*"]}) logging.info("ACL event: %s", e.content) - self.assertFalse(server_matches_acl_event("1.2.3.4", e)) - self.assertTrue(server_matches_acl_event("1a.2.3.4", e)) - self.assertFalse(server_matches_acl_event("[1:2::]", e)) - self.assertTrue(server_matches_acl_event("1:2:3:4", e)) + server_acl_evalutor = server_acl_evaluator_from_event(e) + + self.assertFalse(server_acl_evalutor.server_matches_acl_event("1.2.3.4")) + self.assertTrue(server_acl_evalutor.server_matches_acl_event("1a.2.3.4")) + self.assertFalse(server_acl_evalutor.server_matches_acl_event("[1:2::]")) + self.assertTrue(server_acl_evalutor.server_matches_acl_event("1:2:3:4")) def test_wildcard_matching(self) -> None: e = _create_acl_event({"allow": ["good*.com"]}) + + server_acl_evalutor = server_acl_evaluator_from_event(e) + self.assertTrue( - server_matches_acl_event("good.com", e), + server_acl_evalutor.server_matches_acl_event("good.com"), "* matches 0 characters", ) self.assertTrue( - server_matches_acl_event("GOOD.COM", e), + server_acl_evalutor.server_matches_acl_event("GOOD.COM"), "pattern is case-insensitive", ) self.assertTrue( - server_matches_acl_event("good.aa.com", e), + server_acl_evalutor.server_matches_acl_event("good.aa.com"), "* matches several characters, including '.'", ) self.assertFalse( - server_matches_acl_event("ishgood.com", e), + server_acl_evalutor.server_matches_acl_event("ishgood.com"), "pattern does not allow prefixes", ) From c690fd16c48338cac14111d725c137db25436467 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 28 Sep 2023 07:01:16 -0400 Subject: [PATCH 529/562] Use modern config for maturin. (#16394) This allows maturin >= 0.15 to build the properly named shared library object. For now the old configuration is also kept to allow for older maturin installs to be used. --- changelog.d/16394.misc | 1 + pyproject.toml | 1 + rust/Cargo.toml | 2 ++ 3 files changed, 4 insertions(+) create mode 100644 changelog.d/16394.misc diff --git a/changelog.d/16394.misc b/changelog.d/16394.misc new file mode 100644 index 0000000000..ee08c3e024 --- /dev/null +++ b/changelog.d/16394.misc @@ -0,0 +1 @@ +Update maturin configuration. diff --git a/pyproject.toml b/pyproject.toml index bf51276f4a..5fb64479a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,6 +92,7 @@ skip_gitignore = true [tool.maturin] manifest-path = "rust/Cargo.toml" +module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 16917136db..26403d58cc 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -15,6 +15,8 @@ name = "synapse" # tests/benchmarks. crate-type = ["lib", "cdylib"] +# This is deprecated, see tool.maturin in pyproject.toml. +# It is left here for compatibilty with maturin < 0.15. [package.metadata.maturin] # This is where we tell maturin where to place the built library. name = "synapse.synapse_rust" From cdb89dcefe9f4d7035f898cd77cd514fa69c2673 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 28 Sep 2023 07:01:46 -0400 Subject: [PATCH 530/562] Improve state types. (#16395) --- changelog.d/16395.misc | 1 + synapse/state/v2.py | 5 ++--- tests/state/test_v2.py | 13 ++++++++----- 3 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 changelog.d/16395.misc diff --git a/changelog.d/16395.misc b/changelog.d/16395.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16395.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 1752f95db8..b2e63aed1e 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -23,7 +23,6 @@ from typing import ( Generator, Iterable, List, - Mapping, Optional, Sequence, Set, @@ -269,7 +268,7 @@ async def _get_power_level_for_sender( async def _get_auth_chain_difference( room_id: str, - state_sets: Sequence[Mapping[Any, str]], + state_sets: Sequence[StateMap[str]], unpersisted_events: Dict[str, EventBase], state_res_store: StateResolutionStore, ) -> Set[str]: @@ -405,7 +404,7 @@ def _seperate( # mypy doesn't understand that discarding None above means that conflicted # state is StateMap[Set[str]], not StateMap[Set[Optional[Str]]]. - return unconflicted_state, conflicted_state # type: ignore + return unconflicted_state, conflicted_state # type: ignore[return-value] def _is_power_event(event: EventBase) -> bool: diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 2e3f2318d9..6a2f7584f6 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -719,7 +719,10 @@ class AuthChainDifferenceTestCase(unittest.TestCase): persisted_events = {a.event_id: a, b.event_id: b} unpersited_events = {c.event_id: c} - state_sets = [{"a": a.event_id, "b": b.event_id}, {"c": c.event_id}] + state_sets = [ + {("a", ""): a.event_id, ("b", ""): b.event_id}, + {("c", ""): c.event_id}, + ] store = TestStateResolutionStore(persisted_events) @@ -774,8 +777,8 @@ class AuthChainDifferenceTestCase(unittest.TestCase): unpersited_events = {c.event_id: c, d.event_id: d} state_sets = [ - {"a": a.event_id, "b": b.event_id}, - {"c": c.event_id, "d": d.event_id}, + {("a", ""): a.event_id, ("b", ""): b.event_id}, + {("c", ""): c.event_id, ("d", ""): d.event_id}, ] store = TestStateResolutionStore(persisted_events) @@ -841,8 +844,8 @@ class AuthChainDifferenceTestCase(unittest.TestCase): unpersited_events = {c.event_id: c, d.event_id: d, e.event_id: e} state_sets = [ - {"a": a.event_id, "b": b.event_id, "e": e.event_id}, - {"c": c.event_id, "d": d.event_id}, + {("a", ""): a.event_id, ("b", ""): b.event_id, ("e", ""): e.event_id}, + {("c", ""): c.event_id, ("d", ""): d.event_id}, ] store = TestStateResolutionStore(persisted_events) From fb664cf159d1d6c5d150726cc365debf911f7e74 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 28 Sep 2023 07:02:31 -0400 Subject: [PATCH 531/562] Remove warnings from the docs about using message retention. (#16382) There are no known bugs in the message retention code, but it is possible that there still exists race conditions. Additional fixes will be made as reported. --- changelog.d/16382.doc | 1 + docs/message_retention_policies.md | 3 +-- docs/usage/configuration/config_documentation.md | 7 ++----- 3 files changed, 4 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16382.doc diff --git a/changelog.d/16382.doc b/changelog.d/16382.doc new file mode 100644 index 0000000000..2549586310 --- /dev/null +++ b/changelog.d/16382.doc @@ -0,0 +1 @@ +Update documentation around message retention policies. diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index 7f3e5359f1..2746a106b3 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -8,8 +8,7 @@ and allow server and room admins to configure how long messages should be kept in a homeserver's database before being purged from it. **Please note that, as this feature isn't part of the Matrix specification yet, this implementation is to be considered as -experimental. There are known bugs which may cause database corruption. -Proceed with caution.** +experimental.** A message retention policy is mainly defined by its `max_lifetime` parameter, which defines how long a message can be kept around after diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 54315a417e..502cd9f823 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1026,11 +1026,8 @@ which are older than the room's maximum retention period. Synapse will also filter events received over federation so that events that should have been purged are ignored and not stored again. -The message retention policies feature is disabled by default. Please be advised -that enabling this feature carries some risk. There are known bugs with the implementation -which can cause database corruption. Setting retention to delete older history -is less risky than deleting newer history but in general caution is advised when enabling this -experimental feature. You can read more about this feature [here](../../message_retention_policies.md). +The message retention policies feature is disabled by default. You can read more +about this feature [here](../../message_retention_policies.md). This setting has the following sub-options: * `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the From 79eb6c0cdc15ccb5083368c923653862a4d2d23a Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Fri, 29 Sep 2023 12:19:38 +0100 Subject: [PATCH 532/562] Support rendering some media downloads as inline (#15988) Use an `inline` Content-Disposition header when the media is "safe" to display inline (some known text, image, video, audio formats). --- changelog.d/15988.feature | 1 + synapse/media/_base.py | 42 +++++++++++++++++++++++++++++-- tests/media/test_base.py | 29 ++++++++++++++++++++- tests/media/test_media_storage.py | 40 ++++++++++++++++++++++++++--- 4 files changed, 106 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15988.feature diff --git a/changelog.d/15988.feature b/changelog.d/15988.feature new file mode 100644 index 0000000000..dee8fa597f --- /dev/null +++ b/changelog.d/15988.feature @@ -0,0 +1 @@ +Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. \ No newline at end of file diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 20cb8b9010..80c448de2b 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -50,6 +50,39 @@ TEXT_CONTENT_TYPES = [ "text/xml", ] +# A list of all content types that are "safe" to be rendered inline in a browser. +INLINE_CONTENT_TYPES = [ + "text/css", + "text/plain", + "text/csv", + "application/json", + "application/ld+json", + # We allow some media files deemed as safe, which comes from the matrix-react-sdk. + # https://github.com/matrix-org/matrix-react-sdk/blob/a70fcfd0bcf7f8c85986da18001ea11597989a7c/src/utils/blobs.ts#L51 + # SVGs are *intentionally* omitted. + "image/jpeg", + "image/gif", + "image/png", + "image/apng", + "image/webp", + "image/avif", + "video/mp4", + "video/webm", + "video/ogg", + "video/quicktime", + "audio/mp4", + "audio/webm", + "audio/aac", + "audio/mpeg", + "audio/ogg", + "audio/wave", + "audio/wav", + "audio/x-wav", + "audio/x-pn-wav", + "audio/flac", + "audio/x-flac", +] + def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: """Parses the server name, media ID and optional file name from the request URI @@ -153,8 +186,13 @@ def add_file_headers( request.setHeader(b"Content-Type", content_type.encode("UTF-8")) - # Use a Content-Disposition of attachment to force download of media. - disposition = "attachment" + # A strict subset of content types is allowed to be inlined so that they may + # be viewed directly in a browser. Other file types are forced to be downloads. + if media_type.lower() in INLINE_CONTENT_TYPES: + disposition = "inline" + else: + disposition = "attachment" + if upload_name: # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. # diff --git a/tests/media/test_base.py b/tests/media/test_base.py index 4728c80969..119d7ba66f 100644 --- a/tests/media/test_base.py +++ b/tests/media/test_base.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.media._base import get_filename_from_headers +from unittest.mock import Mock + +from synapse.media._base import add_file_headers, get_filename_from_headers from tests import unittest @@ -36,3 +38,28 @@ class GetFileNameFromHeadersTests(unittest.TestCase): expected, f"expected output for {hdr!r} to be {expected} but was {res}", ) + + +class AddFileHeadersTests(unittest.TestCase): + TEST_CASES = { + "text/plain": b"inline; filename=file.name", + "text/csv": b"inline; filename=file.name", + "image/png": b"inline; filename=file.name", + "text/html": b"attachment; filename=file.name", + "any/thing": b"attachment; filename=file.name", + } + + def test_content_disposition(self) -> None: + for media_type, expected in self.TEST_CASES.items(): + request = Mock() + add_file_headers(request, media_type, 0, "file.name") + request.setHeader.assert_any_call(b"Content-Disposition", expected) + + def test_no_filename(self) -> None: + request = Mock() + add_file_headers(request, "text/plain", 0, None) + request.setHeader.assert_any_call(b"Content-Disposition", b"inline") + + request.reset_mock() + add_file_headers(request, "text/html", 0, None) + request.setHeader.assert_any_call(b"Content-Disposition", b"attachment") diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index ea0051dde4..04fc7bdcef 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -129,6 +129,8 @@ class _TestImage: a 404/400 is expected. unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or False if the thumbnailing should succeed or a normal 404 is expected. + is_inline: True if we expect the file to be served using an inline + Content-Disposition or False if we expect an attachment. """ data: bytes @@ -138,6 +140,7 @@ class _TestImage: expected_scaled: Optional[bytes] = None expected_found: bool = True unable_to_thumbnail: bool = False + is_inline: bool = True @parameterized_class( @@ -198,6 +201,25 @@ class _TestImage: unable_to_thumbnail=True, ), ), + # An SVG. + ( + _TestImage( + b""" + + + + +""", + b"image/svg", + b".svg", + expected_found=False, + unable_to_thumbnail=True, + is_inline=False, + ), + ), ], ) class MediaRepoTests(unittest.HomeserverTestCase): @@ -339,7 +361,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.assertEqual( headers.getRawHeaders(b"Content-Disposition"), - [b"attachment; filename=out" + self.test_image.extension], + [ + (b"inline" if self.test_image.is_inline else b"attachment") + + b"; filename=out" + + self.test_image.extension + ], ) def test_disposition_filenamestar_utf8escaped(self) -> None: @@ -359,7 +385,12 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.assertEqual( headers.getRawHeaders(b"Content-Disposition"), - [b"attachment; filename*=utf-8''" + filename + self.test_image.extension], + [ + (b"inline" if self.test_image.is_inline else b"attachment") + + b"; filename*=utf-8''" + + filename + + self.test_image.extension + ], ) def test_disposition_none(self) -> None: @@ -373,7 +404,10 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.assertEqual( headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type] ) - self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), [b"attachment"]) + self.assertEqual( + headers.getRawHeaders(b"Content-Disposition"), + [b"inline" if self.test_image.is_inline else b"attachment"], + ) def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" From 20fb08ec803c324a58e0f972935a27debaac133f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Sep 2023 14:52:48 +0300 Subject: [PATCH 533/562] Downgrade repl stream time out error to warning (#16401) This is because if a worker reaches ~100% CPU then everything starts lagging and we hit the log line a lot. When at error we invoke sentry and that has a lot of overhead, which then puts even more pressure on the worker. --- changelog.d/16401.misc | 1 + synapse/replication/tcp/client.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16401.misc diff --git a/changelog.d/16401.misc b/changelog.d/16401.misc new file mode 100644 index 0000000000..86d2749a08 --- /dev/null +++ b/changelog.d/16401.misc @@ -0,0 +1 @@ +Downgrade replication stream time out error log lines to warning. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 1c7946522a..f4f2b29e96 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -339,7 +339,7 @@ class ReplicationDataHandler: try: await make_deferred_yieldable(deferred) except defer.TimeoutError: - logger.error( + logger.warning( "Timed out waiting for repl stream %r to reach %s (%s)" "; currently at: %s", stream_name, From 451c08d868ea6431c367d72f6bbbc1ced41469d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:33:59 +0100 Subject: [PATCH 534/562] Bump regex from 1.9.5 to 1.9.6 (#16408) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea9aa18a5c..084b8b91c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick", "memchr", @@ -303,9 +303,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick", "memchr", From 18b453488f27496195453af909c3ed9841970d4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:34:22 +0100 Subject: [PATCH 535/562] Bump psycopg2 from 2.9.7 to 2.9.8 (#16409) --- poetry.lock | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/poetry.lock b/poetry.lock index bf229349cb..d5ab142faa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1749,22 +1749,22 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.7" +version = "2.9.8" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.6" files = [ - {file = "psycopg2-2.9.7-cp310-cp310-win32.whl", hash = "sha256:1a6a2d609bce44f78af4556bea0c62a5e7f05c23e5ea9c599e07678995609084"}, - {file = "psycopg2-2.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:b22ed9c66da2589a664e0f1ca2465c29b75aaab36fa209d4fb916025fb9119e5"}, - {file = "psycopg2-2.9.7-cp311-cp311-win32.whl", hash = "sha256:44d93a0109dfdf22fe399b419bcd7fa589d86895d3931b01fb321d74dadc68f1"}, - {file = "psycopg2-2.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:91e81a8333a0037babfc9fe6d11e997a9d4dac0f38c43074886b0d9dead94fe9"}, - {file = "psycopg2-2.9.7-cp37-cp37m-win32.whl", hash = "sha256:d1210fcf99aae6f728812d1d2240afc1dc44b9e6cba526a06fb8134f969957c2"}, - {file = "psycopg2-2.9.7-cp37-cp37m-win_amd64.whl", hash = "sha256:e9b04cbef584310a1ac0f0d55bb623ca3244c87c51187645432e342de9ae81a8"}, - {file = "psycopg2-2.9.7-cp38-cp38-win32.whl", hash = "sha256:d5c5297e2fbc8068d4255f1e606bfc9291f06f91ec31b2a0d4c536210ac5c0a2"}, - {file = "psycopg2-2.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:8275abf628c6dc7ec834ea63f6f3846bf33518907a2b9b693d41fd063767a866"}, - {file = "psycopg2-2.9.7-cp39-cp39-win32.whl", hash = "sha256:c7949770cafbd2f12cecc97dea410c514368908a103acf519f2a346134caa4d5"}, - {file = "psycopg2-2.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:b6bd7d9d3a7a63faae6edf365f0ed0e9b0a1aaf1da3ca146e6b043fb3eb5d723"}, - {file = "psycopg2-2.9.7.tar.gz", hash = "sha256:f00cc35bd7119f1fed17b85bd1007855194dde2cbd8de01ab8ebb17487440ad8"}, + {file = "psycopg2-2.9.8-cp310-cp310-win32.whl", hash = "sha256:2f8594f92bbb5d8b59ffec04e2686c416401e2d4297de1193f8e75235937e71d"}, + {file = "psycopg2-2.9.8-cp310-cp310-win_amd64.whl", hash = "sha256:f9ecbf504c4eaff90139d5c9b95d47275f2b2651e14eba56392b4041fbf4c2b3"}, + {file = "psycopg2-2.9.8-cp311-cp311-win32.whl", hash = "sha256:65f81e72136d8b9ac8abf5206938d60f50da424149a43b6073f1546063c0565e"}, + {file = "psycopg2-2.9.8-cp311-cp311-win_amd64.whl", hash = "sha256:f7e62095d749359b7854143843f27edd7dccfcd3e1d833b880562aa5702d92b0"}, + {file = "psycopg2-2.9.8-cp37-cp37m-win32.whl", hash = "sha256:81b21424023a290a40884c7f8b0093ba6465b59bd785c18f757e76945f65594c"}, + {file = "psycopg2-2.9.8-cp37-cp37m-win_amd64.whl", hash = "sha256:67c2f32f3aba79afb15799575e77ee2db6b46b8acf943c21d34d02d4e1041d50"}, + {file = "psycopg2-2.9.8-cp38-cp38-win32.whl", hash = "sha256:287a64ef168ef7fb9f382964705ff664b342bfff47e7242bf0a04ef203269dd5"}, + {file = "psycopg2-2.9.8-cp38-cp38-win_amd64.whl", hash = "sha256:dcde3cad4920e29e74bf4e76c072649764914facb2069e6b7fa1ddbebcd49e9f"}, + {file = "psycopg2-2.9.8-cp39-cp39-win32.whl", hash = "sha256:d4ad050ea50a16731d219c3a85e8f2debf49415a070f0b8331ccc96c81700d9b"}, + {file = "psycopg2-2.9.8-cp39-cp39-win_amd64.whl", hash = "sha256:d39bb3959788b2c9d7bf5ff762e29f436172b241cd7b47529baac77746fd7918"}, + {file = "psycopg2-2.9.8.tar.gz", hash = "sha256:3da6488042a53b50933244085f3f91803f1b7271f970f3e5536efa69314f6a49"}, ] [[package]] @@ -2170,6 +2170,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2177,8 +2178,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2195,6 +2203,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2202,6 +2211,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, From 36c8b66403f0a59a07a76054ae8c9f00f831f579 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:11 +0100 Subject: [PATCH 536/562] Bump pydantic from 2.3.0 to 2.4.2 (#16410) --- poetry.lock | 222 ++++++++++++++++++++++++++-------------------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/poetry.lock b/poetry.lock index d5ab142faa..83e8a71ed1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1832,18 +1832,18 @@ files = [ [[package]] name = "pydantic" -version = "2.3.0" +version = "2.4.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, - {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, + {file = "pydantic-2.4.2-py3-none-any.whl", hash = "sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1"}, + {file = "pydantic-2.4.2.tar.gz", hash = "sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.6.3" +pydantic-core = "2.10.1" typing-extensions = ">=4.6.1" [package.extras] @@ -1851,117 +1851,117 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.6.3" +version = "2.10.1" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, - {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, - {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, - {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, - {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, - {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, - {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, - {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, - {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, - {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, - {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, - {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, - {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, - {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, - {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, - {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, + {file = "pydantic_core-2.10.1-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63"}, + {file = "pydantic_core-2.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e"}, + {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e"}, + {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6"}, + {file = "pydantic_core-2.10.1-cp310-none-win32.whl", hash = "sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b"}, + {file = "pydantic_core-2.10.1-cp310-none-win_amd64.whl", hash = "sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0"}, + {file = "pydantic_core-2.10.1-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea"}, + {file = "pydantic_core-2.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4"}, + {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607"}, + {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f"}, + {file = "pydantic_core-2.10.1-cp311-none-win32.whl", hash = "sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6"}, + {file = "pydantic_core-2.10.1-cp311-none-win_amd64.whl", hash = "sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27"}, + {file = "pydantic_core-2.10.1-cp311-none-win_arm64.whl", hash = "sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325"}, + {file = "pydantic_core-2.10.1-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921"}, + {file = "pydantic_core-2.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d"}, + {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f"}, + {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c"}, + {file = "pydantic_core-2.10.1-cp312-none-win32.whl", hash = "sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f"}, + {file = "pydantic_core-2.10.1-cp312-none-win_amd64.whl", hash = "sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430"}, + {file = "pydantic_core-2.10.1-cp312-none-win_arm64.whl", hash = "sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f"}, + {file = "pydantic_core-2.10.1-cp37-none-win32.whl", hash = "sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c"}, + {file = "pydantic_core-2.10.1-cp37-none-win_amd64.whl", hash = "sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e"}, + {file = "pydantic_core-2.10.1-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc"}, + {file = "pydantic_core-2.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e"}, + {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561"}, + {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de"}, + {file = "pydantic_core-2.10.1-cp38-none-win32.whl", hash = "sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee"}, + {file = "pydantic_core-2.10.1-cp38-none-win_amd64.whl", hash = "sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e"}, + {file = "pydantic_core-2.10.1-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970"}, + {file = "pydantic_core-2.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429"}, + {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7"}, + {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595"}, + {file = "pydantic_core-2.10.1-cp39-none-win32.whl", hash = "sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a"}, + {file = "pydantic_core-2.10.1-cp39-none-win_amd64.whl", hash = "sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776"}, + {file = "pydantic_core-2.10.1.tar.gz", hash = "sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82"}, ] [package.dependencies] From 0a59372d9d57f92536d0962d0399b21356ede906 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:30 +0100 Subject: [PATCH 537/562] Bump types-netaddr from 0.8.0.9 to 0.9.0.1 (#16411) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 83e8a71ed1..f525bc874b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3070,13 +3070,13 @@ files = [ [[package]] name = "types-netaddr" -version = "0.8.0.9" +version = "0.9.0.1" description = "Typing stubs for netaddr" optional = false python-versions = "*" files = [ - {file = "types-netaddr-0.8.0.9.tar.gz", hash = "sha256:68900c267fd31627c1721c5c52b32a257657ac2777457dca49b6b096ba2faf74"}, - {file = "types_netaddr-0.8.0.9-py3-none-any.whl", hash = "sha256:63e871f064cd59473cec1177f372526f0fa3d565050247d5305bdc325be5c3f6"}, + {file = "types-netaddr-0.9.0.1.tar.gz", hash = "sha256:e04638435abad3e3b13a4a6b1b07f36619a47597fd5c10f330474196c058dfb3"}, + {file = "types_netaddr-0.9.0.1-py3-none-any.whl", hash = "sha256:81b98c959d14de96eb53507ac606e8876c91413d273554a59fd42b34e3811fe0"}, ] [[package]] From 891f42f8c84e1adbb51d15cb82673f7ff245bf38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:45 +0100 Subject: [PATCH 538/562] Bump msgpack from 1.0.6 to 1.0.7 (#16412) --- poetry.lock | 114 ++++++++++++++++++++++++++-------------------------- 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/poetry.lock b/poetry.lock index f525bc874b..9de5c49bf7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1390,67 +1390,67 @@ files = [ [[package]] name = "msgpack" -version = "1.0.6" +version = "1.0.7" description = "MessagePack serializer" optional = false python-versions = ">=3.8" files = [ - {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce"}, - {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5"}, - {file = "msgpack-1.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803"}, - {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f"}, - {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c"}, - {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d"}, - {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c"}, - {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980"}, - {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c"}, - {file = "msgpack-1.0.6-cp310-cp310-win32.whl", hash = "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4"}, - {file = "msgpack-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e"}, - {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e"}, - {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367"}, - {file = "msgpack-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89"}, - {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b"}, - {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911"}, - {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37"}, - {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8"}, - {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80"}, - {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7"}, - {file = "msgpack-1.0.6-cp311-cp311-win32.whl", hash = "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887"}, - {file = "msgpack-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e"}, - {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823"}, - {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0"}, - {file = "msgpack-1.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af"}, - {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc"}, - {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685"}, - {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f"}, - {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f"}, - {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9"}, - {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077"}, - {file = "msgpack-1.0.6-cp312-cp312-win32.whl", hash = "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5"}, - {file = "msgpack-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341"}, - {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7"}, - {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052"}, - {file = "msgpack-1.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76"}, - {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb"}, - {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8"}, - {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd"}, - {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2"}, - {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe"}, - {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2"}, - {file = "msgpack-1.0.6-cp38-cp38-win32.whl", hash = "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8"}, - {file = "msgpack-1.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355"}, - {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006"}, - {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a"}, - {file = "msgpack-1.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9"}, - {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787"}, - {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3"}, - {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed"}, - {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428"}, - {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b"}, - {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8"}, - {file = "msgpack-1.0.6-cp39-cp39-win32.whl", hash = "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e"}, - {file = "msgpack-1.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb"}, - {file = "msgpack-1.0.6.tar.gz", hash = "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681"}, + {file = "msgpack-1.0.7-cp310-cp310-win32.whl", hash = "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9"}, + {file = "msgpack-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415"}, + {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84"}, + {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93"}, + {file = "msgpack-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8"}, + {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46"}, + {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b"}, + {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e"}, + {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002"}, + {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c"}, + {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e"}, + {file = "msgpack-1.0.7-cp311-cp311-win32.whl", hash = "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1"}, + {file = "msgpack-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82"}, + {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b"}, + {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4"}, + {file = "msgpack-1.0.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee"}, + {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5"}, + {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672"}, + {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075"}, + {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba"}, + {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c"}, + {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5"}, + {file = "msgpack-1.0.7-cp312-cp312-win32.whl", hash = "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9"}, + {file = "msgpack-1.0.7-cp312-cp312-win_amd64.whl", hash = "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c"}, + {file = "msgpack-1.0.7-cp38-cp38-win32.whl", hash = "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2"}, + {file = "msgpack-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f"}, + {file = "msgpack-1.0.7-cp39-cp39-win32.whl", hash = "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad"}, + {file = "msgpack-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3"}, + {file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"}, ] [[package]] From d40a939ff672cb9391c348c34af34b8a2252be67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:57 +0100 Subject: [PATCH 539/562] Bump phonenumbers from 8.13.19 to 8.13.22 (#16413) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9de5c49bf7..255396033c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1617,13 +1617,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.19" +version = "8.13.22" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.19-py2.py3-none-any.whl", hash = "sha256:ba542f20f6dc83be8f127f240f9b5b7e7c1dec42aceff1879400d4dc0c781d81"}, - {file = "phonenumbers-8.13.19.tar.gz", hash = "sha256:38180247697240ccedd74dec4bfbdbc22bb108b9c5f991f270ca3e41395e6f96"}, + {file = "phonenumbers-8.13.22-py2.py3-none-any.whl", hash = "sha256:85ceeba9e67984ba98182c77e8e4c70093d38c0c6a0cb2bd392e0694ddaeb1f6"}, + {file = "phonenumbers-8.13.22.tar.gz", hash = "sha256:001664c90f59b8954766c2db85adafc8dbc96177efeb49607ca4e64a7acaf569"}, ] [[package]] From 5725712d477e41761aa89a79edd77d613c36a30a Mon Sep 17 00:00:00 2001 From: MomentQYC <62551256+MomentQYC@users.noreply.github.com> Date: Mon, 2 Oct 2023 21:07:53 +0800 Subject: [PATCH 540/562] Remove Python version from `/_synapse/admin/v1/server_version` (#16380) There's no reason to expose the full Python version over what is frequently a public API. --- changelog.d/16380.removal | 1 + docs/admin_api/version_api.md | 10 ++++++---- synapse/rest/admin/__init__.py | 6 +----- tests/rest/admin/test_admin.py | 4 +--- 4 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 changelog.d/16380.removal diff --git a/changelog.d/16380.removal b/changelog.d/16380.removal new file mode 100644 index 0000000000..6e9372134d --- /dev/null +++ b/changelog.d/16380.removal @@ -0,0 +1 @@ +Remove Python version from `/_synapse/admin/v1/server_version`. \ No newline at end of file diff --git a/docs/admin_api/version_api.md b/docs/admin_api/version_api.md index 27977de0d3..bdc37d9119 100644 --- a/docs/admin_api/version_api.md +++ b/docs/admin_api/version_api.md @@ -1,7 +1,7 @@ # Version API -This API returns the running Synapse version and the Python version -on which Synapse is being run. This is useful when a Synapse instance +This API returns the running Synapse version. +This is useful when a Synapse instance is behind a proxy that does not forward the 'Server' header (which also contains Synapse version information). @@ -15,7 +15,9 @@ It returns a JSON body like the following: ```json { - "server_version": "0.99.2rc1 (b=develop, abcdef123)", - "python_version": "3.7.8" + "server_version": "0.99.2rc1 (b=develop, abcdef123)" } ``` + +*Changed in Synapse 1.94.0:* The `python_version` key was removed from the +response body. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 7d0b4b55a0..e42dade246 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -16,7 +16,6 @@ # limitations under the License. import logging -import platform from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple @@ -107,10 +106,7 @@ class VersionServlet(RestServlet): PATTERNS = admin_patterns("/server_version$") def __init__(self, hs: "HomeServer"): - self.res = { - "server_version": SYNAPSE_VERSION, - "python_version": platform.python_version(), - } + self.res = {"server_version": SYNAPSE_VERSION} def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: return HTTPStatus.OK, self.res diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 695e84357a..359d131b37 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -42,9 +42,7 @@ class VersionTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, shorthand=False) self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual( - {"server_version", "python_version"}, set(channel.json_body.keys()) - ) + self.assertEqual({"server_version"}, set(channel.json_body.keys())) class QuarantineMediaTestCase(unittest.HomeserverTestCase): From 102677638002b3ef6ae956947333ddcde80680a7 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 2 Oct 2023 15:22:36 +0100 Subject: [PATCH 541/562] mypy plugin to check `@cached` return types (#14911) Co-authored-by: David Robertson Co-authored-by: Patrick Cloke Co-authored-by: Erik Johnston Assert that the return type of callables wrapped in @cached and @cachedList are cachable (aka immutable). --- changelog.d/14911.misc | 1 + scripts-dev/mypy_synapse_plugin.py | 287 +++++++++++++++--- synapse/handlers/room_list.py | 4 +- .../databases/main/event_push_actions.py | 7 +- synapse/storage/databases/main/relations.py | 12 +- synapse/storage/databases/main/roommember.py | 5 +- synapse/storage/roommember.py | 1 + synapse/util/caches/descriptors.py | 64 +++- 8 files changed, 323 insertions(+), 58 deletions(-) create mode 100644 changelog.d/14911.misc diff --git a/changelog.d/14911.misc b/changelog.d/14911.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/14911.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index a0b3854f1b..6592a4a6b7 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -16,13 +16,24 @@ can crop up, e.g the cache descriptors. """ -from typing import Callable, Optional, Type +from typing import Callable, Optional, Tuple, Type, Union +import mypy.types from mypy.erasetype import remove_instance_last_known_values -from mypy.nodes import ARG_NAMED_OPT -from mypy.plugin import MethodSigContext, Plugin +from mypy.errorcodes import ErrorCode +from mypy.nodes import ARG_NAMED_OPT, TempNode, Var +from mypy.plugin import FunctionSigContext, MethodSigContext, Plugin from mypy.typeops import bind_self -from mypy.types import CallableType, Instance, NoneType, UnionType +from mypy.types import ( + AnyType, + CallableType, + Instance, + NoneType, + TupleType, + TypeAliasType, + UninhabitedType, + UnionType, +) class SynapsePlugin(Plugin): @@ -36,9 +47,37 @@ class SynapsePlugin(Plugin): ) ): return cached_function_method_signature + + if fullname in ( + "synapse.util.caches.descriptors._CachedFunctionDescriptor.__call__", + "synapse.util.caches.descriptors._CachedListFunctionDescriptor.__call__", + ): + return check_is_cacheable_wrapper + return None +def _get_true_return_type(signature: CallableType) -> mypy.types.Type: + """ + Get the "final" return type of a callable which might return an Awaitable/Deferred. + """ + if isinstance(signature.ret_type, Instance): + # If a coroutine, unwrap the coroutine's return type. + if signature.ret_type.type.fullname == "typing.Coroutine": + return signature.ret_type.args[2] + + # If an awaitable, unwrap the awaitable's final value. + elif signature.ret_type.type.fullname == "typing.Awaitable": + return signature.ret_type.args[0] + + # If a Deferred, unwrap the Deferred's final value. + elif signature.ret_type.type.fullname == "twisted.internet.defer.Deferred": + return signature.ret_type.args[0] + + # Otherwise, return the raw value of the function. + return signature.ret_type + + def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: """Fixes the `CachedFunction.__call__` signature to be correct. @@ -47,16 +86,17 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: 1. the `self` argument needs to be marked as "bound"; 2. any `cache_context` argument should be removed; 3. an optional keyword argument `on_invalidated` should be added. + 4. Wrap the return type to always be a Deferred. """ - # First we mark this as a bound function signature. - signature = bind_self(ctx.default_signature) + # 1. Mark this as a bound function signature. + signature: CallableType = bind_self(ctx.default_signature) - # Secondly, we remove any "cache_context" args. + # 2. Remove any "cache_context" args. # # Note: We should be only doing this if `cache_context=True` is set, but if # it isn't then the code will raise an exception when its called anyway, so - # its not the end of the world. + # it's not the end of the world. context_arg_index = None for idx, name in enumerate(signature.arg_names): if name == "cache_context": @@ -72,7 +112,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: arg_names.pop(context_arg_index) arg_kinds.pop(context_arg_index) - # Third, we add an optional "on_invalidate" argument. + # 3. Add an optional "on_invalidate" argument. # # This is a either # - a callable which accepts no input and returns nothing, or @@ -94,35 +134,16 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: arg_names.append("on_invalidate") arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg. - # Finally we ensure the return type is a Deferred. - if ( - isinstance(signature.ret_type, Instance) - and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred" - ): - # If it is already a Deferred, nothing to do. - ret_type = signature.ret_type - else: - ret_arg = None - if isinstance(signature.ret_type, Instance): - # If a coroutine, wrap the coroutine's return type in a Deferred. - if signature.ret_type.type.fullname == "typing.Coroutine": - ret_arg = signature.ret_type.args[2] + # 4. Ensure the return type is a Deferred. + ret_arg = _get_true_return_type(signature) - # If an awaitable, wrap the awaitable's final value in a Deferred. - elif signature.ret_type.type.fullname == "typing.Awaitable": - ret_arg = signature.ret_type.args[0] - - # Otherwise, wrap the return value in a Deferred. - if ret_arg is None: - ret_arg = signature.ret_type - - # This should be able to use ctx.api.named_generic_type, but that doesn't seem - # to find the correct symbol for anything more than 1 module deep. - # - # modules is not part of CheckerPluginInterface. The following is a combination - # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo. - sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined] - ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)]) + # This should be able to use ctx.api.named_generic_type, but that doesn't seem + # to find the correct symbol for anything more than 1 module deep. + # + # modules is not part of CheckerPluginInterface. The following is a combination + # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo. + sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined] + ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)]) signature = signature.copy_modified( arg_types=arg_types, @@ -134,6 +155,198 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: return signature +def check_is_cacheable_wrapper(ctx: MethodSigContext) -> CallableType: + """Asserts that the signature of a method returns a value which can be cached. + + Makes no changes to the provided method signature. + """ + # The true signature, this isn't being modified so this is what will be returned. + signature: CallableType = ctx.default_signature + + if not isinstance(ctx.args[0][0], TempNode): + ctx.api.note("Cached function is not a TempNode?!", ctx.context) # type: ignore[attr-defined] + return signature + + orig_sig = ctx.args[0][0].type + if not isinstance(orig_sig, CallableType): + ctx.api.fail("Cached 'function' is not a callable", ctx.context) + return signature + + check_is_cacheable(orig_sig, ctx) + + return signature + + +def check_is_cacheable( + signature: CallableType, + ctx: Union[MethodSigContext, FunctionSigContext], +) -> None: + """ + Check if a callable returns a type which can be cached. + + Args: + signature: The callable to check. + ctx: The signature context, used for error reporting. + """ + # Unwrap the true return type from the cached function. + return_type = _get_true_return_type(signature) + + verbose = ctx.api.options.verbosity >= 1 + # TODO Technically a cachedList only needs immutable values, but forcing them + # to return Mapping instead of Dict is fine. + ok, note = is_cacheable(return_type, signature, verbose) + + if ok: + message = f"function {signature.name} is @cached, returning {return_type}" + else: + message = f"function {signature.name} is @cached, but has mutable return value {return_type}" + + if note: + message += f" ({note})" + message = message.replace("builtins.", "").replace("typing.", "") + + if ok and note: + ctx.api.note(message, ctx.context) # type: ignore[attr-defined] + elif not ok: + ctx.api.fail(message, ctx.context, code=AT_CACHED_MUTABLE_RETURN) + + +# Immutable simple values. +IMMUTABLE_VALUE_TYPES = { + "builtins.bool", + "builtins.int", + "builtins.float", + "builtins.str", + "builtins.bytes", +} + +# Types defined in Synapse which are known to be immutable. +IMMUTABLE_CUSTOM_TYPES = { + "synapse.synapse_rust.acl.ServerAclEvaluator", + "synapse.synapse_rust.push.FilteredPushRules", + # This is technically not immutable, but close enough. + "signedjson.types.VerifyKey", +} + +# Immutable containers only if the values are also immutable. +IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS = { + "builtins.frozenset", + "builtins.tuple", + "typing.AbstractSet", + "typing.Sequence", + "immutabledict.immutabledict", +} + +MUTABLE_CONTAINER_TYPES = { + "builtins.set", + "builtins.list", + "builtins.dict", +} + +AT_CACHED_MUTABLE_RETURN = ErrorCode( + "synapse-@cached-mutable", + "@cached() should have an immutable return type", + "General", +) + + +def is_cacheable( + rt: mypy.types.Type, signature: CallableType, verbose: bool +) -> Tuple[bool, Optional[str]]: + """ + Check if a particular type is cachable. + + A type is cachable if it is immutable; for complex types this recurses to + check each type parameter. + + Returns: a 2-tuple (cacheable, message). + - cachable: False means the type is definitely not cacheable; + true means anything else. + - Optional message. + """ + + # This should probably be done via a TypeVisitor. Apologies to the reader! + if isinstance(rt, AnyType): + return True, ("may be mutable" if verbose else None) + + elif isinstance(rt, Instance): + if ( + rt.type.fullname in IMMUTABLE_VALUE_TYPES + or rt.type.fullname in IMMUTABLE_CUSTOM_TYPES + ): + # "Simple" types are generally immutable. + return True, None + + elif rt.type.fullname == "typing.Mapping": + # Generally mapping keys are immutable, but they only *have* to be + # hashable, which doesn't imply immutability. E.g. Mapping[K, V] + # is cachable iff K and V are cachable. + return is_cacheable(rt.args[0], signature, verbose) and is_cacheable( + rt.args[1], signature, verbose + ) + + elif rt.type.fullname in IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS: + # E.g. Collection[T] is cachable iff T is cachable. + return is_cacheable(rt.args[0], signature, verbose) + + elif rt.type.fullname in MUTABLE_CONTAINER_TYPES: + # Mutable containers are mutable regardless of their underlying type. + return False, None + + elif "attrs" in rt.type.metadata: + # attrs classes are only cachable iff it is frozen (immutable itself) + # and all attributes are cachable. + frozen = rt.type.metadata["attrs"]["frozen"] + if frozen: + for attribute in rt.type.metadata["attrs"]["attributes"]: + attribute_name = attribute["name"] + symbol_node = rt.type.names[attribute_name].node + assert isinstance(symbol_node, Var) + assert symbol_node.type is not None + ok, note = is_cacheable(symbol_node.type, signature, verbose) + if not ok: + return False, f"non-frozen attrs property: {attribute_name}" + # All attributes were frozen. + return True, None + else: + return False, "non-frozen attrs class" + + else: + # Ensure we fail for unknown types, these generally means that the + # above code is not complete. + return ( + False, + f"Don't know how to handle {rt.type.fullname} return type instance", + ) + + elif isinstance(rt, NoneType): + # None is cachable. + return True, None + + elif isinstance(rt, (TupleType, UnionType)): + # Tuples and unions are cachable iff all their items are cachable. + for item in rt.items: + ok, note = is_cacheable(item, signature, verbose) + if not ok: + return False, note + # This discards notes but that's probably fine + return True, None + + elif isinstance(rt, TypeAliasType): + # For a type alias, check if the underlying real type is cachable. + return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose) + + elif isinstance(rt, UninhabitedType) and rt.is_noreturn: + # There is no return value, just consider it cachable. This is only used + # in tests. + return True, None + + else: + # Ensure we fail for unknown types, these generally means that the + # above code is not complete. + return False, f"Don't know how to handle {type(rt).__qualname__} return type" + + def plugin(version: str) -> Type[SynapsePlugin]: # This is the entry point of the plugin, and lets us deal with the fact # that the mypy plugin interface is *not* stable by looking at the version diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index bb0bdb8e6f..36e2db8975 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -33,7 +33,7 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) -from synapse.types import JsonDict, ThirdPartyInstanceID +from synapse.types import JsonDict, JsonMapping, ThirdPartyInstanceID from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.caches.response_cache import ResponseCache @@ -256,7 +256,7 @@ class RoomListHandler: cache_context: _CacheContext, with_alias: bool = True, allow_private: bool = False, - ) -> Optional[JsonDict]: + ) -> Optional[JsonMapping]: """Returns the entry for a room Args: diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index ba99e63d26..39556481ff 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -182,6 +182,7 @@ class UserPushAction(EmailPushAction): profile_tag: str +# TODO This is used as a cached value and is mutable. @attr.s(slots=True, auto_attribs=True) class NotifCounts: """ @@ -193,7 +194,7 @@ class NotifCounts: highlight_count: int = 0 -@attr.s(slots=True, auto_attribs=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class RoomNotifCounts: """ The per-user, per-room count of notifications. Used by sync and push. @@ -201,7 +202,7 @@ class RoomNotifCounts: main_timeline: NotifCounts # Map of thread ID to the notification counts. - threads: Dict[str, NotifCounts] + threads: Mapping[str, NotifCounts] @staticmethod def empty() -> "RoomNotifCounts": @@ -483,7 +484,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas return room_to_count - @cached(tree=True, max_entries=5000, iterable=True) + @cached(tree=True, max_entries=5000, iterable=True) # type: ignore[synapse-@cached-mutable] async def get_unread_event_push_actions_by_room_for_user( self, room_id: str, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index b67f780c10..9246b418f5 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -458,7 +458,7 @@ class RelationsWorkerStore(SQLBaseStore): ) return result is not None - @cached() + @cached() # type: ignore[synapse-@cached-mutable] async def get_references_for_event(self, event_id: str) -> List[JsonDict]: raise NotImplementedError() @@ -512,11 +512,12 @@ class RelationsWorkerStore(SQLBaseStore): "_get_references_for_events_txn", _get_references_for_events_txn ) - @cached() + @cached() # type: ignore[synapse-@cached-mutable] def get_applicable_edit(self, event_id: str) -> Optional[EventBase]: raise NotImplementedError() - @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") + # TODO: This returns a mutable object, which is generally bad. + @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") # type: ignore[synapse-@cached-mutable] async def get_applicable_edits( self, event_ids: Collection[str] ) -> Mapping[str, Optional[EventBase]]: @@ -598,11 +599,12 @@ class RelationsWorkerStore(SQLBaseStore): for original_event_id in event_ids } - @cached() + @cached() # type: ignore[synapse-@cached-mutable] def get_thread_summary(self, event_id: str) -> Optional[Tuple[int, EventBase]]: raise NotImplementedError() - @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") + # TODO: This returns a mutable object, which is generally bad. + @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") # type: ignore[synapse-@cached-mutable] async def get_thread_summaries( self, event_ids: Collection[str] ) -> Mapping[str, Optional[Tuple[int, EventBase]]]: diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 3755773faa..e93573f315 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -275,7 +275,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): _get_users_in_room_with_profiles, ) - @cached(max_entries=100000) + @cached(max_entries=100000) # type: ignore[synapse-@cached-mutable] async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]: """Get the details of a room roughly suitable for use by the room summary extension to /sync. Useful when lazy loading room members. @@ -1071,7 +1071,8 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) return {row["event_id"]: row["membership"] for row in rows} - @cached(max_entries=10000) + # TODO This returns a mutable object, which is generally confusing when using a cache. + @cached(max_entries=10000) # type: ignore[synapse-@cached-mutable] def _get_joined_hosts_cache(self, room_id: str) -> "_JoinedHostsCache": return _JoinedHostsCache() diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 2500381b7b..cbfb32014c 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -45,6 +45,7 @@ class ProfileInfo: display_name: Optional[str] +# TODO This is used as a cached value and is mutable. @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) class MemberSummary: # A truncated list of (user_id, event_id) tuples for users of a given diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 8514a75a1c..ce736fdf75 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -36,6 +36,8 @@ from typing import ( ) from weakref import WeakValueDictionary +import attr + from twisted.internet import defer from twisted.python.failure import Failure @@ -466,6 +468,35 @@ class _CacheContext: ) +@attr.s(auto_attribs=True, slots=True, frozen=True) +class _CachedFunctionDescriptor: + """Helper for `@cached`, we name it so that we can hook into it with mypy + plugin.""" + + max_entries: int + num_args: Optional[int] + uncached_args: Optional[Collection[str]] + tree: bool + cache_context: bool + iterable: bool + prune_unread_entries: bool + name: Optional[str] + + def __call__(self, orig: F) -> CachedFunction[F]: + d = DeferredCacheDescriptor( + orig, + max_entries=self.max_entries, + num_args=self.num_args, + uncached_args=self.uncached_args, + tree=self.tree, + cache_context=self.cache_context, + iterable=self.iterable, + prune_unread_entries=self.prune_unread_entries, + name=self.name, + ) + return cast(CachedFunction[F], d) + + def cached( *, max_entries: int = 1000, @@ -476,9 +507,8 @@ def cached( iterable: bool = False, prune_unread_entries: bool = True, name: Optional[str] = None, -) -> Callable[[F], CachedFunction[F]]: - func = lambda orig: DeferredCacheDescriptor( - orig, +) -> _CachedFunctionDescriptor: + return _CachedFunctionDescriptor( max_entries=max_entries, num_args=num_args, uncached_args=uncached_args, @@ -489,7 +519,26 @@ def cached( name=name, ) - return cast(Callable[[F], CachedFunction[F]], func) + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class _CachedListFunctionDescriptor: + """Helper for `@cachedList`, we name it so that we can hook into it with mypy + plugin.""" + + cached_method_name: str + list_name: str + num_args: Optional[int] = None + name: Optional[str] = None + + def __call__(self, orig: F) -> CachedFunction[F]: + d = DeferredCacheListDescriptor( + orig, + cached_method_name=self.cached_method_name, + list_name=self.list_name, + num_args=self.num_args, + name=self.name, + ) + return cast(CachedFunction[F], d) def cachedList( @@ -498,7 +547,7 @@ def cachedList( list_name: str, num_args: Optional[int] = None, name: Optional[str] = None, -) -> Callable[[F], CachedFunction[F]]: +) -> _CachedListFunctionDescriptor: """Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`. Used to do batch lookups for an already created cache. One of the arguments @@ -527,16 +576,13 @@ def cachedList( def batch_do_something(self, first_arg, second_args): ... """ - func = lambda orig: DeferredCacheListDescriptor( - orig, + return _CachedListFunctionDescriptor( cached_method_name=cached_method_name, list_name=list_name, num_args=num_args, name=name, ) - return cast(Callable[[F], CachedFunction[F]], func) - def _get_cache_key_builder( param_names: Sequence[str], From 127b940dc0806b8d74456d34e3f636ef1f6f1c68 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 2 Oct 2023 11:05:29 -0400 Subject: [PATCH 542/562] Clean-up old release notes (#16418) Fixes some broken formatting from the reStructuedText to Markdown conversion and fixes some typos. --- changelog.d/16418.doc | 1 + docs/changelogs/CHANGES-pre-1.0.md | 267 +++++++++++++++-------------- docs/user_directory.md | 2 +- 3 files changed, 136 insertions(+), 134 deletions(-) create mode 100644 changelog.d/16418.doc diff --git a/changelog.d/16418.doc b/changelog.d/16418.doc new file mode 100644 index 0000000000..4ec5dbb6b2 --- /dev/null +++ b/changelog.d/16418.doc @@ -0,0 +1 @@ +Improve legacy release notes. diff --git a/docs/changelogs/CHANGES-pre-1.0.md b/docs/changelogs/CHANGES-pre-1.0.md index e414dbb3b1..a08f867b67 100644 --- a/docs/changelogs/CHANGES-pre-1.0.md +++ b/docs/changelogs/CHANGES-pre-1.0.md @@ -1186,9 +1186,9 @@ Synapse 0.33.0rc1 (2018-07-18) Features -------- -- Enforce the specified API for report\_event. ([\#3316](https://github.com/matrix-org/synapse/issues/3316)) +- Enforce the specified API for `report_event`. ([\#3316](https://github.com/matrix-org/synapse/issues/3316)) - Include CPU time from database threads in request/block metrics. ([\#3496](https://github.com/matrix-org/synapse/issues/3496), [\#3501](https://github.com/matrix-org/synapse/issues/3501)) -- Add CPU metrics for \_fetch\_event\_list. ([\#3497](https://github.com/matrix-org/synapse/issues/3497)) +- Add CPU metrics for `_fetch_event_list`. ([\#3497](https://github.com/matrix-org/synapse/issues/3497)) - Optimisation to make handling incoming federation requests more efficient. ([\#3541](https://github.com/matrix-org/synapse/issues/3541)) Bugfixes @@ -1238,19 +1238,19 @@ Features - Add metrics to track appservice transactions ([\#3344](https://github.com/matrix-org/synapse/issues/3344)) - Try to log more helpful info when a sig verification fails ([\#3372](https://github.com/matrix-org/synapse/issues/3372)) - Synapse now uses the best performing JSON encoder/decoder according to your runtime (simplejson on CPython, stdlib json on PyPy). ([\#3462](https://github.com/matrix-org/synapse/issues/3462)) -- Add optional ip\_range\_whitelist param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465)) +- Add optional `ip_range_whitelist` param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465)) - Reject invalid server names in federation requests ([\#3480](https://github.com/matrix-org/synapse/issues/3480)) - Reject invalid server names in homeserver.yaml ([\#3483](https://github.com/matrix-org/synapse/issues/3483)) Bugfixes -------- -- Strip access\_token from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327)) +- Strip `access_token` from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327)) - Redact AS tokens in logs ([\#3349](https://github.com/matrix-org/synapse/issues/3349)) - Fix federation backfill from SQLite servers ([\#3355](https://github.com/matrix-org/synapse/issues/3355)) - Fix event-purge-by-ts admin API ([\#3363](https://github.com/matrix-org/synapse/issues/3363)) -- Fix event filtering in get\_missing\_events handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371)) -- Synapse is now stricter regarding accepting events which it cannot retrieve the prev\_events for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456)) +- Fix event filtering in `get_missing_events` handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371)) +- Synapse is now stricter regarding accepting events which it cannot retrieve the `prev_events` for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456)) - Fix bug where synapse would explode when receiving unicode in HTTP User-Agent header ([\#3470](https://github.com/matrix-org/synapse/issues/3470)) - Invalidate cache on correct thread to avoid race ([\#3473](https://github.com/matrix-org/synapse/issues/3473)) @@ -1262,7 +1262,7 @@ Improved Documentation Deprecations and Removals ------------------------- -- Remove was\_forgotten\_at ([\#3324](https://github.com/matrix-org/synapse/issues/3324)) +- Remove `was_forgotten_at` ([\#3324](https://github.com/matrix-org/synapse/issues/3324)) Misc ---- @@ -1285,7 +1285,7 @@ We are not aware of it being actively exploited but please upgrade asap. Bug Fixes: -- Fix event filtering in get\_missing\_events handler (PR #3371) +- Fix event filtering in `get_missing_events` handler (PR #3371) Changes in synapse v0.31.0 (2018-06-06) ======================================= @@ -1309,7 +1309,7 @@ Features: Changes: - daily user type phone home stats (PR #3264) -- Use iter\* methods for \_filter\_events\_for\_server (PR #3267) +- Use `iter*` methods for `_filter_events_for_server` (PR #3267) - Docs on consent bits (PR #3268) - Remove users from user directory on deactivate (PR #3277) - Avoid sending consent notice to guest users (PR #3288) @@ -1323,10 +1323,10 @@ Changes, python 3 migration: - Replace some more comparisons with six (PR #3243) Thanks to @NotAFile! - replace some iteritems with six (PR #3244) Thanks to @NotAFile! -- Add batch\_iter to utils (PR #3245) Thanks to @NotAFile! +- Add `batch_iter` to utils (PR #3245) Thanks to @NotAFile! - use repr, not str (PR #3246) Thanks to @NotAFile! - Misc Python3 fixes (PR #3247) Thanks to @NotAFile! -- Py3 storage/\_base.py (PR #3278) Thanks to @NotAFile! +- Py3 `storage/_base.py` (PR #3278) Thanks to @NotAFile! - more six iteritems (PR #3279) Thanks to @NotAFile! - More Misc. py3 fixes (PR #3280) Thanks to @NotAFile! - remaining isintance fixes (PR #3281) Thanks to @NotAFile! @@ -1342,7 +1342,7 @@ Bugs: Changes in synapse v0.30.0 (2018-05-24) ======================================= -\'Server Notices\' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server. +"Server Notices" are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server. They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as "Message of the day". @@ -1350,9 +1350,9 @@ This feature is specific to Synapse, but uses standard Matrix communication mech Further Server Notices/Consent Tracking Support: -- Allow overriding the server\_notices user's avatar (PR #3273) +- Allow overriding the `server_notices` user's avatar (PR #3273) - Use the localpart in the consent uri (PR #3272) -- Support for putting %(consent\_uri)s in messages (PR #3271) +- Support for putting `%(consent_uri)s` in messages (PR #3271) - Block attempts to send server notices to remote users (PR #3270) - Docs on consent bits (PR #3268) @@ -1366,7 +1366,7 @@ Server Notices/Consent Tracking Support: - Infrastructure for a server notices room (PR #3232) - Send users a server notice about consent (PR #3236) - Reject attempts to send event before privacy consent is given (PR #3257) -- Add a \'has\_consented\' template var to consent forms (PR #3262) +- Add a `has_consented` template var to consent forms (PR #3262) - Fix dependency on jinja2 (PR #3263) Features: @@ -1377,9 +1377,9 @@ Features: Changes: -- Remove unused update\_external\_syncs (PR #3233) +- Remove unused `update_external_syncs` (PR #3233) - Use stream rather depth ordering for push actions (PR #3212) -- Make purge\_history operate on tokens (PR #3221) +- Make `purge_history` operate on tokens (PR #3221) - Don't support limitless pagination (PR #3265) Bug Fixes: @@ -1421,29 +1421,29 @@ Changes - General: - nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77! - Part user from rooms on account deactivate (PR #3201) -- Make \'unexpected logging context\' into warnings (PR #3007) +- Make "unexpected logging context" into warnings (PR #3007) - Set Server header in SynapseRequest (PR #3208) - remove duplicates from groups tables (PR #3129) - Improve exception handling for background processes (PR #3138) - Add missing consumeErrors to improve exception handling (PR #3139) - reraise exceptions more carefully (PR #3142) -- Remove redundant call to preserve\_fn (PR #3143) -- Trap exceptions thrown within run\_in\_background (PR #3144) +- Remove redundant call to `preserve_fn` (PR #3143) +- Trap exceptions thrown within `run_in_background` (PR #3144) Changes - Refactors: - Refactor /context to reuse pagination storage functions (PR #3193) - Refactor recent events func to use pagination func (PR #3195) - Refactor pagination DB API to return concrete type (PR #3196) -- Refactor get\_recent\_events\_for\_room return type (PR #3198) +- Refactor `get_recent_events_for_room` return type (PR #3198) - Refactor sync APIs to reuse pagination API (PR #3199) - Remove unused code path from member change DB func (PR #3200) - Refactor request handling wrappers (PR #3203) -- transaction\_id, destination defined twice (PR #3209) Thanks to @damir-manapov! +- `transaction_id`, destination defined twice (PR #3209) Thanks to @damir-manapov! - Refactor event storage to prepare for changes in state calculations (PR #3141) - Set Server header in SynapseRequest (PR #3208) -- Use deferred.addTimeout instead of time\_bound\_deferred (PR #3127, #3178) -- Use run\_in\_background in preference to preserve\_fn (PR #3140) +- Use deferred.addTimeout instead of `time_bound_deferred` (PR #3127, #3178) +- Use `run_in_background` in preference to `preserve_fn` (PR #3140) Changes - Python 3 migration: @@ -1463,29 +1463,29 @@ Changes - Python 3 migration: Bug Fixes: -- synapse fails to start under Twisted \>= 18.4 (PR #3157) +- synapse fails to start under Twisted >= 18.4 (PR #3157) - Fix a class of logcontext leaks (PR #3170) - Fix a couple of logcontext leaks in unit tests (PR #3172) - Fix logcontext leak in media repo (PR #3174) - Escape label values in prometheus metrics (PR #3175, #3186) -- Fix \'Unhandled Error\' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot! +- Fix "Unhandled Error" logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot! - Fix logcontext leaks in rate limiter (PR #3183) -- notifications: Convert next\_token to string according to the spec (PR #3190) Thanks to @mujx! +- notifications: Convert `next_token` to string according to the spec (PR #3190) Thanks to @mujx! - nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77! -- add guard for None on purge\_history api (PR #3160) Thanks to @krombel! +- add guard for None on `purge_history` api (PR #3160) Thanks to @krombel! Changes in synapse v0.28.1 (2018-05-01) ======================================= SECURITY UPDATE -- Clamp the allowed values of event depth received over federation to be \[0, 2\^63 - 1\]. This mitigates an attack where malicious events injected with depth = 2\^63 - 1 render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using stream\_ordering rather than depth (topological\_ordering). +- Clamp the allowed values of event depth received over federation to be `[0, 2^63 - 1]`. This mitigates an attack where malicious events injected with `depth = 2^63 - 1` render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using `stream_ordering` rather than `depth` (topological ordering). This is a temporary solution to mitigate abuse in the wild, whilst a long term solution is being implemented to improve how the depth parameter is used. Full details at -- Pin Twisted to \<18.4 until we stop using the private \_OpenSSLECCurve API. +- Pin Twisted to <18.4 until we stop using the private `_OpenSSLECCurve` API. Changes in synapse v0.28.0 (2018-04-26) ======================================= @@ -1510,7 +1510,7 @@ Features: Changes: - Synapse on PyPy (PR #2760) Thanks to @Valodim! -- move handling of auto\_join\_rooms to RegisterHandler (PR #2996) Thanks to @krombel! +- move handling of `auto_join_rooms` to RegisterHandler (PR #2996) Thanks to @krombel! - Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh! - Document the behaviour of ResponseCache (PR #3059) - Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile! @@ -1524,15 +1524,15 @@ Changes: - Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh! - Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile! - Use six.itervalues in some places (PR #3106) Thanks to @NotAFile! -- Refactor store.have\_events (PR #3117) +- Refactor `store.have_events` (PR #3117) Bug Fixes: -- Return 401 for invalid access\_token on logout (PR #2938) Thanks to @dklug! +- Return 401 for invalid `access_token` on logout (PR #2938) Thanks to @dklug! - Return a 404 rather than a 500 on rejoining empty rooms (PR #3080) -- fix federation\_domain\_whitelist (PR #3099) -- Avoid creating events with huge numbers of prev\_events (PR #3113) -- Reject events which have lots of prev\_events (PR #3118) +- fix `federation_domain_whitelist` (PR #3099) +- Avoid creating events with huge numbers of `prev_events` (PR #3113) +- Reject events which have lots of `prev_events` (PR #3118) Changes in synapse v0.27.4 (2018-04-13) ======================================= @@ -1556,12 +1556,13 @@ v0.27.3-rc1 used a stale version of the develop branch so the changelog overstat Changes in synapse v0.27.3-rc1 (2018-04-09) =========================================== -Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:- +Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording "r30" stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:- -Counts the number of native 30 day retained users, defined as:- \* Users who have created their accounts more than 30 days +Counts the number of native 30 day retained users, defined as: -: - Where last seen at most 30 days ago - - Where account creation and last\_seen are \> 30 days\" +- Users who have created their accounts more than 30 days +- Where last seen at most 30 days ago +- Where account creation and `last_seen` are > 30 days Features: @@ -1577,9 +1578,9 @@ Features: Changes: - Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live! -- Replace old style error catching with \'as\' keyword (PR #3000) Thanks to @NotAFile! -- Use .iter\* to avoid copies in StateHandler (PR #3006) -- Linearize calls to \_generate\_user\_id (PR #3029) +- Replace old style error catching with `as` keyword (PR #3000) Thanks to @NotAFile! +- Use `.iter*` to avoid copies in StateHandler (PR #3006) +- Linearize calls to `_generate_user_id` (PR #3029) - Remove last usage of ujson (PR #3030) - Use simplejson throughout (PR #3048) - Use static JSONEncoders (PR #3049) @@ -1588,13 +1589,13 @@ Changes: Bug fixes: -- Add room\_id to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte! +- Add `room_id` to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte! - Fix replication after switch to simplejson (PR #3015) - 404 correctly on missing paths via NoResource (PR #3022) - Fix error when claiming e2e keys from offline servers (PR #3034) -- fix tests/storage/test\_user\_directory.py (PR #3042) -- use PUT instead of POST for federating groups/m.join\_policy (PR #3070) Thanks to @krombel! -- postgres port script: fix state\_groups\_pkey error (PR #3072) +- fix `tests/storage/test_user_directory.py` (PR #3042) +- use `PUT` instead of `POST` for federating `groups`/`m.join_policy` (PR #3070) Thanks to @krombel! +- postgres port script: fix `state_groups_pkey` error (PR #3072) Changes in synapse v0.27.2 (2018-03-26) ======================================= @@ -1640,7 +1641,7 @@ Features: - Add ability for ASes to override message send time (PR #2754) - Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767) -- Add purge API features, see [docs/admin\_api/purge\_history\_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943) +- Add purge API features, see [docs/admin_api/purge_history_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943) - Add support for whitelisting 3PIDs that users can register. (PR #2813) - Add `/room/{id}/event/{id}` API (PR #2766) - Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live! @@ -1669,8 +1670,8 @@ Bug fixes: - Fix publicised groups GET API (singular) over federation (PR #2772) - Fix user directory when using `user_directory_search_all_users` config option (PR #2803, #2831) - Fix error on `/publicRooms` when no rooms exist (PR #2827) -- Fix bug in quarantine\_media (PR #2837) -- Fix url\_previews when no Content-Type is returned from URL (PR #2845) +- Fix bug in `quarantine_media` (PR #2837) +- Fix `url_previews` when no `Content-Type` is returned from URL (PR #2845) - Fix rare race in sync API when joining room (PR #2944) - Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848) @@ -1685,27 +1686,27 @@ Changes in synapse v0.26.0-rc1 (2017-12-13) Features: - Add ability for ASes to publicise groups for their users (PR #2686) -- Add all local users to the user\_directory and optionally search them (PR #2723) +- Add all local users to the `user_directory` and optionally search them (PR #2723) - Add support for custom login types for validating users (PR #2729) Changes: - Update example Prometheus config to new format (PR #2648) Thanks to @krombel! -- Rename redact\_content option to include\_content in Push API (PR #2650) +- Rename `redact_content` option to `include_content` in Push API (PR #2650) - Declare support for r0.3.0 (PR #2677) - Improve upserts (PR #2684, #2688, #2689, #2713) - Improve documentation of workers (PR #2700) - Improve tracebacks on exceptions (PR #2705) - Allow guest access to group APIs for reading (PR #2715) -- Support for posting content in federation\_client script (PR #2716) +- Support for posting content in `federation_client` script (PR #2716) - Delete devices and pushers on logouts etc (PR #2722) Bug fixes: - Fix database port script (PR #2673) -- Fix internal server error on login with ldap\_auth\_provider (PR #2678) Thanks to @jkolo! +- Fix internal server error on login with `ldap_auth_provider` (PR #2678) Thanks to @jkolo! - Fix error on sqlite 3.7 (PR #2697) -- Fix OPTIONS on preview\_url (PR #2707) +- Fix `OPTIONS` on `preview_url` (PR #2707) - Fix error handling on dns lookup (PR #2711) - Fix wrong avatars when inviting multiple users when creating room (PR #2717) - Fix 500 when joining matrix-dev (PR #2719) @@ -1729,7 +1730,7 @@ Changes in synapse v0.25.0-rc1 (2017-11-14) Features: -- Add is\_public to groups table to allow for private groups (PR #2582) +- Add `is_public` to groups table to allow for private groups (PR #2582) - Add a route for determining who you are (PR #2668) Thanks to @turt2live! - Add more features to the password providers (PR #2608, #2610, #2620, #2622, #2623, #2624, #2626, #2628, #2629) - Add a hook for custom rest endpoints (PR #2627) @@ -1737,7 +1738,7 @@ Features: Changes: -- Ignore \ tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt! +- Ignore `` tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt! - Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to @krombel! - Support /keys/upload on /r0 as well as /unstable (PR #2585) - Front-end proxy: pass through auth header (PR #2586) @@ -1745,9 +1746,9 @@ Changes: - Remove refresh tokens (PR #2613) - Automatically set default displayname on register (PR #2617) - Log login requests (PR #2618) -- Always return is\_public in the /groups/:group\_id/rooms API (PR #2630) +- Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630) - Avoid no-op media deletes (PR #2637) Thanks to @spantaleev! -- Fix various embarrassing typos around user\_directory and add some doc. (PR #2643) +- Fix various embarrassing typos around `user_directory` and add some doc. (PR #2643) - Return whether a user is an admin within a group (PR #2647) - Namespace visibility options for groups (PR #2657) - Downcase UserIDs on registration (PR #2662) @@ -1760,7 +1761,7 @@ Bug fixes: - Fix UI auth when deleting devices (PR #2591) - Fix typo when checking if user is invited to group (PR #2599) - Fix the port script to drop NUL values in all tables (PR #2611) -- Fix appservices being backlogged and not receiving new events due to a bug in notify\_interested\_services (PR #2631) Thanks to @xyzz! +- Fix appservices being backlogged and not receiving new events due to a bug in `notify_interested_services` (PR #2631) Thanks to @xyzz! - Fix updating rooms avatar/display name when modified by admin (PR #2636) Thanks to @farialima! - Fix bug in state group storage (PR #2649) - Fix 500 on invalid utf-8 in request (PR #2663) @@ -1794,7 +1795,7 @@ Changes: - Ignore incoming events for rooms that we have left (PR #2490) - Allow spam checker to reject invites too (PR #2492) - Add room creation checks to spam checker (PR #2495) -- Spam checking: add the invitee to user\_may\_invite (PR #2502) +- Spam checking: add the invitee to `user_may_invite` (PR #2502) - Process events from federation for different rooms in parallel (PR #2520) - Allow error strings from spam checker (PR #2531) - Improve error handling for missing files in config (PR #2551) @@ -1805,7 +1806,7 @@ Bug fixes: - Fix incompatibility with newer versions of ujson (PR #2483) Thanks to @jeremycline! - Fix notification keywords that start/end with non-word chars (PR #2500) - Fix stack overflow and logcontexts from linearizer (PR #2532) -- Fix 500 error when fields missing from power\_levels event (PR #2552) +- Fix 500 error when fields missing from `power_levels` event (PR #2552) - Fix 500 error when we get an error handling a PDU (PR #2553) Changes in synapse v0.23.1 (2017-10-02) @@ -1813,7 +1814,7 @@ Changes in synapse v0.23.1 (2017-10-02) Changes: -- Make \'affinity\' package optional, as it is not supported on some platforms +- Make `affinity` package optional, as it is not supported on some platforms Changes in synapse v0.23.0 (2017-10-02) ======================================= @@ -1833,7 +1834,7 @@ Changes in synapse v0.23.0-rc1 (2017-09-25) Features: - Add a frontend proxy worker (PR #2344) -- Add support for event\_id\_only push format (PR #2450) +- Add support for `event_id_only` push format (PR #2450) - Add a PoC for filtering spammy events (PR #2456) - Add a config option to block all room invites (PR #2457) @@ -1897,12 +1898,12 @@ Changes: - Deduplicate sync filters (PR #2219) Thanks to @krombel! - Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist! - Add count of one time keys to sync stream (PR #2237) -- Only store event\_auth for state events (PR #2247) +- Only store `event_auth` for state events (PR #2247) - Store URL cache preview downloads separately (PR #2299) Bug fixes: -- Fix users not getting notifications when AS listened to that user\_id (PR #2216) Thanks to @slipeer! +- Fix users not getting notifications when AS listened to that `user_id` (PR #2216) Thanks to @slipeer! - Fix users without push set up not getting notifications after joining rooms (PR #2236) - Fix preview url API to trim long descriptions (PR #2243) - Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart (PR #2263) @@ -1935,7 +1936,7 @@ Changes: - Update username availability checker API (PR #2209, #2213) - When purging, Don't de-delta state groups we're about to delete (PR #2214) - Documentation to check synapse version (PR #2215) Thanks to @hamber-dick! -- Add an index to event\_search to speed up purge history API (PR #2218) +- Add an index to `event_search` to speed up purge history API (PR #2218) Bug fixes: @@ -2004,7 +2005,7 @@ Changes in synapse v0.20.0-rc1 (2017-03-30) Features: -- Add delete\_devices API (PR #1993) +- Add `delete_devices` API (PR #1993) - Add phone number registration/login support (PR #1994, #2055) Changes: @@ -2024,12 +2025,12 @@ Changes: Bug fixes: -- Fix bug where current\_state\_events renamed to current\_state\_ids (PR #1849) +- Fix bug where `current_state_events` renamed to `current_state_ids` (PR #1849) - Fix routing loop when fetching remote media (PR #1992) -- Fix current\_state\_events table to not lie (PR #1996) +- Fix `current_state_events` table to not lie (PR #1996) - Fix CAS login to handle PartialDownloadError (PR #1997) - Fix assertion to stop transaction queue getting wedged (PR #2010) -- Fix presence to fallback to last\_active\_ts if it beats the last sync time. Thanks @Half-Shot! (PR #2014) +- Fix presence to fallback to `last_active_ts` if it beats the last sync time. Thanks @Half-Shot! (PR #2014) - Fix bug when federation received a PDU while a room join is in progress (PR #2016) - Fix resetting state on rejected events (PR #2025) - Fix installation issues in readme. Thanks @ricco386 (PR #2037) @@ -2064,7 +2065,7 @@ Changes: Bug fixes: -- Fix synapse\_port\_db failure. Thanks to Pneumaticat! (PR #1904) +- Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904) - Fix caching to not cache error responses (PR #1913) - Fix APIs to make kick & ban reasons work (PR #1917) - Fix bugs in the /keys/changes api (PR #1921) @@ -2099,7 +2100,7 @@ Changes in synapse v0.19.0-rc3 (2017-02-02) =========================================== - Fix email push in pusher worker (PR #1875) -- Make presence.get\_new\_events a bit faster (PR #1876) +- Make `presence.get_new_events` a bit faster (PR #1876) - Make /keys/changes a bit more performant (PR #1877) Changes in synapse v0.19.0-rc2 (2017-02-02) @@ -2122,14 +2123,14 @@ Features: Changes: - Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph! -- Log which files we saved attachments to in the media\_repository (PR #1791) +- Log which files we saved attachments to in the `media_repository` (PR #1791) - Linearize updates to membership via PUT /state/ to better handle multiple joins (PR #1787) - Limit number of entries to prefill from cache on startup (PR #1792) -- Remove full\_twisted\_stacktraces option (PR #1802) +- Remove `full_twisted_stacktraces` option (PR #1802) - Measure size of some caches by sum of the size of cached values (PR #1815) -- Measure metrics of string\_cache (PR #1821) +- Measure metrics of `string_cache` (PR #1821) - Reduce logging verbosity (PR #1822, #1823, #1824) -- Don't clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852) +- Don't clobber a displayname or `avatar_url` if provided by an m.room.member event (PR #1852) - Better handle 401/404 response for federation /send/ (PR #1866, #1871) Fixes: @@ -2142,7 +2143,7 @@ Fixes: Performance: - Don't block messages sending on bumping presence (PR #1789) -- Change device\_inbox stream index to include user (PR #1793) +- Change `device_inbox` stream index to include user (PR #1793) - Optimise state resolution (PR #1818) - Use DB cache of joined users for presence (PR #1862) - Add an index to make membership queries faster (PR #1867) @@ -2225,7 +2226,7 @@ Changes: - Enable guest access for private rooms by default (PR #653) - Limit the number of events that can be created on a given room concurrently (PR #1620) - Log the args that we have on UI auth completion (PR #1649) -- Stop generating refresh\_tokens (PR #1654) +- Stop generating `refresh_tokens` (PR #1654) - Stop putting a time caveat on access tokens (PR #1656) - Remove unspecced GET endpoints for e2e keys (PR #1694) @@ -2250,7 +2251,7 @@ Changes in synapse v0.18.5-rc1 (2016-11-24) Features: -- Implement \"event\_fields\" in filters (PR #1638) +- Implement `event_fields` in filters (PR #1638) Changes: @@ -2279,7 +2280,7 @@ Bug fixes: - Fix media repo to set CORs headers on responses (PR #1190) - Fix registration to not error on non-ascii passwords (PR #1191) -- Fix create event code to limit the number of prev\_events (PR #1615) +- Fix create event code to limit the number of `prev_events` (PR #1615) - Fix bug in transaction ID deduplication (PR #1624) Changes in synapse v0.18.3 (2016-11-08) @@ -2338,10 +2339,10 @@ Changes in synapse v0.18.2-rc1 (2016-10-17) Changes: -- Remove redundant event\_auth index (PR #1113) +- Remove redundant `event_auth` index (PR #1113) - Reduce DB hits for replication (PR #1141) - Implement pluggable password auth (PR #1155) -- Remove rate limiting from app service senders and fix get\_or\_create\_user requester, thanks to Patrik Oldsberg (PR #1157) +- Remove rate limiting from app service senders and fix `get_or_create_user` requester, thanks to Patrik Oldsberg (PR #1157) - window.postmessage for Interactive Auth fallback (PR #1159) - Use sys.executable instead of hardcoded python, thanks to Pedro Larroy (PR #1162) - Add config option for adding additional TLS fingerprints (PR #1167) @@ -2349,7 +2350,7 @@ Changes: Bug fixes: -- Fix not being allowed to set your own state\_key, thanks to Patrik Oldsberg (PR #1150) +- Fix not being allowed to set your own `state_key`, thanks to Patrik Oldsberg (PR #1150) - Fix interactive auth to return 401 from for incorrect password (PR #1160, #1166) - Fix email push notifs being dropped (PR #1169) @@ -2363,7 +2364,7 @@ Changes in synapse v0.18.1-rc1 (2016-09-30) Features: -- Add total\_room\_count\_estimate to `/publicRooms` (PR #1133) +- Add `total_room_count_estimate` to `/publicRooms` (PR #1133) Changes: @@ -2398,17 +2399,17 @@ Features: - Add `only=highlight` on `/notifications` (PR #1081) - Add server param to /publicRooms (PR #1082) - Allow clients to ask for the whole of a single state event (PR #1094) -- Add is\_direct param to /createRoom (PR #1108) +- Add `is_direct` param to /createRoom (PR #1108) - Add pagination support to publicRooms (PR #1121) - Add very basic filter API to /publicRooms (PR #1126) - Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, #1111) Changes: -- Move to storing state\_groups\_state as deltas, greatly reducing DB size (PR #1065) +- Move to storing `state_groups_state` as deltas, greatly reducing DB size (PR #1065) - Reduce amount of state pulled out of the DB during common requests (PR #1069) - Allow PDF to be rendered from media repo (PR #1071) -- Reindex state\_groups\_state after pruning (PR #1085) +- Reindex `state_groups_state` after pruning (PR #1085) - Clobber EDUs in send queue (PR #1095) - Conform better to the CAS protocol specification (PR #1100) - Limit how often we ask for keys from dead servers (PR #1114) @@ -2442,22 +2443,22 @@ Changes: - Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068) - Don't notify for online to online presence transitions. (PR #1054) - Occasionally persist unpersisted presence updates (PR #1055) -- Allow application services to have an optional \'url\' (PR #1056) +- Allow application services to have an optional `url` (PR #1056) - Clean up old sent transactions from DB (PR #1059) Bug fixes: - Fix None check in backfill (PR #1043) - Fix membership changes to be idempotent (PR #1067) -- Fix bug in get\_pdu where it would sometimes return events with incorrect signature +- Fix bug in `get_pdu` where it would sometimes return events with incorrect signature Changes in synapse v0.17.1 (2016-08-24) ======================================= Changes: -- Delete old received\_transactions rows (PR #1038) -- Pass through user-supplied content in /join/\$room\_id (PR #1039) +- Delete old `received_transactions` rows (PR #1038) +- Pass through user-supplied content in `/join/$room_id` (PR #1039) Bug fixes: @@ -2478,15 +2479,15 @@ Changes: - Move default display name push rule (PR #1011, #1023) - Fix up preview URL API. Add tests. (PR #1015) - Set `Content-Security-Policy` on media repo (PR #1021) -- Make notify\_interested\_services faster (PR #1022) +- Make `notify_interested_services` faster (PR #1022) - Add usage stats to prometheus monitoring (PR #1037) Bug fixes: - Fix token login (PR #993) - Fix CAS login (PR #994, #995) -- Fix /sync to not clobber status\_msg (PR #997) -- Fix redacted state events to include prev\_content (PR #1003) +- Fix /sync to not clobber `status_msg` (PR #997) +- Fix redacted state events to include `prev_content` (PR #1003) - Fix some bugs in the auth/ldap handler (PR #1007) - Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits (PR #1012) - Fix AS push code to not send duplicate events (PR #1025) @@ -2527,7 +2528,7 @@ Changes in synapse v0.17.0-rc3 (2016-08-02) Changes: -- Forbid non-ASes from registering users whose names begin with \'\_\' (PR #958) +- Forbid non-ASes from registering users whose names begin with `_` (PR #958) - Add some basic admin API docs (PR #963) Bug fixes: @@ -2549,16 +2550,16 @@ This release changes the LDAP configuration format in a backwards incompatible w Features: -- Add purge\_media\_cache admin API (PR #902) +- Add `purge_media_cache` admin API (PR #902) - Add deactivate account admin API (PR #903) - Add optional pepper to password hashing (PR #907, #910 by KentShikama) - Add an admin option to shared secret registration (breaks backwards compat) (PR #909) - Add purge local room history API (PR #911, #923, #924) - Add requestToken endpoints (PR #915) - Add an /account/deactivate endpoint (PR #921) -- Add filter param to /messages. Add \'contains\_url\' to filter. (PR #922) -- Add device\_id support to /login (PR #929) -- Add device\_id support to /v2/register flow. (PR #937, #942) +- Add filter param to /messages. Add `contains_url` to filter. (PR #922) +- Add `device_id` support to /login (PR #929) +- Add `device_id` support to /v2/register flow. (PR #937, #942) - Add GET /devices endpoint (PR #939, #944) - Add GET /device/{deviceId} (PR #943) - Add update and delete APIs for devices (PR #949) @@ -2566,14 +2567,14 @@ Features: Changes: - Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt) -- Linearize some federation endpoints based on (origin, room\_id) (PR #879) +- Linearize some federation endpoints based on `(origin, room_id)` (PR #879) - Remove the legacy v0 content upload API. (PR #888) - Use similar naming we use in email notifs for push (PR #894) - Optionally include password hash in createUser endpoint (PR #905 by KentShikama) -- Use a query that postgresql optimises better for get\_events\_around (PR #906) -- Fall back to \'username\' if \'user\' is not given for appservice registration. (PR #927 by Half-Shot) +- Use a query that postgresql optimises better for `get_events_around` (PR #906) +- Fall back to '`username` if `user` is not given for appservice registration. (PR #927 by Half-Shot) - Add metrics for psutil derived memory usage (PR #936) -- Record device\_id in client\_ips (PR #938) +- Record `device_id` in `client_ips` (PR #938) - Send the correct host header when fetching keys (PR #941) - Log the hostname the reCAPTCHA was completed on (PR #946) - Make the device id on e2e key upload optional (PR #956) @@ -2586,8 +2587,8 @@ Bug fixes: - Put most recent 20 messages in email notif (PR #892) - Ensure that the guest user is in the database when upgrading accounts (PR #914) - Fix various edge cases in auth handling (PR #919) -- Fix 500 ISE when sending alias event without a state\_key (PR #925) -- Fix bug where we stored rejections in the state\_group, persist all rejections (PR #948) +- Fix 500 ISE when sending alias event without a `state_key` (PR #925) +- Fix bug where we stored rejections in the `state_group`, persist all rejections (PR #948) - Fix lack of check of if the user is banned when handling 3pid invites (PR #952) - Fix a couple of bugs in the transaction and keyring code (PR #954, #955) @@ -2656,7 +2657,7 @@ Changes: Bug fixes: -- Fix \'From\' header in email notifications (PR #843) +- Fix `From` header in email notifications (PR #843) - Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842) - Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906) @@ -2677,22 +2678,22 @@ Features: - Add a `url_preview_ip_range_whitelist` config param (PR #760) - Add /report endpoint (PR #762) - Add basic ignore user API (PR #763) -- Add an openidish mechanism for proving that you own a given user\_id (PR #765) -- Allow clients to specify a server\_name to avoid \'No known servers\' (PR #794) -- Add secondary\_directory\_servers option to fetch room list from other servers (PR #808, #813) +- Add an openidish mechanism for proving that you own a given `user_id` (PR #765) +- Allow clients to specify a `server_name` to avoid "No known servers" (PR #794) +- Add `secondary_directory_servers` option to fetch room list from other servers (PR #808, #813) Changes: -- Report per request metrics for all of the things using request\_handler (PR #756) +- Report per request metrics for all of the things using `request_handler` (PR #756) - Correctly handle `NULL` password hashes from the database (PR #775) - Allow receipts for events we haven't seen in the db (PR #784) - Make synctl read a cache factor from config file (PR #785) - Increment badge count per missed convo, not per msg (PR #793) -- Special case m.room.third\_party\_invite event auth to match invites (PR #814) +- Special case `m.room.third_party_invite` event auth to match invites (PR #814) Bug fixes: -- Fix typo in event\_auth servlet path (PR #757) +- Fix typo in `event_auth` servlet path (PR #757) - Fix password reset (PR #758) Performance improvements: @@ -2708,7 +2709,7 @@ Performance improvements: - Add `get_users_with_read_receipts_in_room` cache (PR #809) - Use state to calculate `get_users_in_room` (PR #811) - Load push rules in storage layer so that they get cached (PR #825) -- Make `get_joined_hosts_for_room` use get\_users\_in\_room (PR #828) +- Make `get_joined_hosts_for_room` use `get_users_in_room` (PR #828) - Poke notifier on next reactor tick (PR #829) - Change CacheMetrics to be quicker (PR #830) @@ -2772,19 +2773,19 @@ Changes in synapse v0.14.0-rc1 (2016-03-14) Features: -- Add event\_id to response to state event PUT (PR #581) +- Add `event_id` to response to state event PUT (PR #581) - Allow guest users access to messages in rooms they have joined (PR #587) - Add config for what state is included in a room invite (PR #598) - Send the inviter's member event in room invite state (PR #607) - Add error codes for malformed/bad JSON in /login (PR #608) - Add support for changing the actions for default rules (PR #609) -- Add environment variable SYNAPSE\_CACHE\_FACTOR, default it to 0.1 (PR #612) +- Add environment variable `SYNAPSE_CACHE_FACTOR`, default it to 0.1 (PR #612) - Add ability for alias creators to delete aliases (PR #614) - Add profile information to invites (PR #624) Changes: -- Enforce user\_id exclusivity for AS registrations (PR #572) +- Enforce `user_id` exclusivity for AS registrations (PR #572) - Make adding push rules idempotent (PR #587) - Improve presence performance (PR #582, #586) - Change presence semantics for `last_active_ago` (PR #582, #586) @@ -2792,7 +2793,7 @@ Changes: - Add 800x600 to default list of valid thumbnail sizes (PR #616) - Always include kicks and bans in full /sync (PR #625) - Send history visibility on boundary changes (PR #626) -- Register endpoint now returns a refresh\_token (PR #637) +- Register endpoint now returns a `refresh_token` (PR #637) Bug fixes: @@ -2963,7 +2964,7 @@ Changes in synapse v0.11.0-rc1 (2015-11-11) =========================================== - Add Search API (PR #307, #324, #327, #336, #350, #359) -- Add \'archived\' state to v2 /sync API (PR #316) +- Add `archived` state to v2 /sync API (PR #316) - Add ability to reject invites (PR #317) - Add config option to disable password login (PR #322) - Add the login fallback API (PR #330) @@ -3028,7 +3029,7 @@ Changes in synapse v0.10.0-rc3 (2015-08-25) =========================================== - Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. (PR #250) -- Allow `--config-path` to specify a directory, causing synapse to use all \*.yaml files in the directory as config files. (PR #249) +- Allow `--config-path` to specify a directory, causing synapse to use all `*.yaml` files in the directory as config files. (PR #249) - Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. (PR #245) - Add helper utility to synapse to read and parse the config files and extract the value of a given key. For example: @@ -3060,7 +3061,7 @@ General: - Batch various storage request (PR #226, #228) - Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service (PR #230) - Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232) -- Add support for AS's to use v2\_alpha registration API (PR #210) +- Add support for AS's to use `v2_alpha` registration API (PR #210) Configuration: @@ -3207,7 +3208,7 @@ Configuration: Application services: - Reliably retry sending of events from Synapse to application services, as per [Application Services](https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api) spec. -- Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application\_services.rst](docs/application_services.rst) for more information. +- Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application_services.rst](docs/application_services.rst) for more information. Changes in synapse v0.8.1 (2015-03-18) ====================================== @@ -3289,7 +3290,7 @@ Changes in synapse 0.6.0 (2014-12-16) ===================================== - Add new API for media upload and download that supports thumbnailing. -- Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the \--content-addr parameter and confusion over accessing content directly from remote homeservers. +- Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the `--content-addr` parameter and confusion over accessing content directly from remote homeservers. - Implement exponential backoff when retrying federation requests when sending to remote homeservers which are offline. - Implement typing notifications. - Fix bugs where we sent events with invalid signatures due to bugs where we incorrectly persisted events. @@ -3304,13 +3305,13 @@ Changes in synapse 0.5.4 (2014-12-03) ===================================== - Fix presence bug where some rooms did not display presence updates for remote users. -- Do not log SQL timing log lines when started with \"-v\" +- Do not log SQL timing log lines when started with `-v` - Fix potential memory leak. Changes in synapse 0.5.3c (2014-12-02) ====================================== -- Change the default value for the content\_addr option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate. +- Change the default value for the `content_addr` option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate. Changes in synapse 0.5.3 (2014-11-27) ===================================== @@ -3391,7 +3392,7 @@ You will also need an updated syutil and config. See UPGRADES.rst. Homeserver: - Sign federation transactions to assert strong identity over federation. -- Rename timestamp keys in PDUs and events from \'ts\' and \'hsob\_ts\' to \'origin\_server\_ts\'. +- Rename timestamp keys in PDUs and events from `ts` and `hsob_ts` to `origin_server_ts`. Changes in synapse 0.3.4 (2014-09-25) ===================================== @@ -3461,9 +3462,9 @@ See UPGRADE for information about changes to the client server API, including br Homeserver: - When a user changes their displayname or avatar the server will now update all their join states to reflect this. -- The server now adds \"age\" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else. +- The server now adds `age` key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else. - Fix bug where we didn't correctly pull in missing PDUs. -- Fix bug where prev\_content key wasn't always returned. +- Fix bug where `prev_content` key wasn't always returned. - Add support for password resets. Webclient: @@ -3481,9 +3482,9 @@ Webclient: Registration API: -- The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: \'type\':\'m.login.password\'. See UPGRADE for more information on this. -- The \'user\_id\' key has been renamed to \'user\' to better match the login API. -- There is an additional login type: \'m.login.email.identity\'. +- The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: `type`:`m.login.password`. See UPGRADE for more information on this. +- The `user_id` key has been renamed to `user` to better match the login API. +- There is an additional login type: `m.login.email.identity`. - The command client and web client have been updated to reflect these changes. Changes in synapse 0.2.3 (2014-09-12) @@ -3516,7 +3517,7 @@ Homeserver: - When the server returns state events it now also includes the previous content. - Add support for inviting people when creating a new room. - Make the homeserver inform the room via m.room.aliases when a new alias is added for a room. -- Validate m.room.power\_level events. +- Validate `m.room.power_level` events. Webclient: @@ -3559,7 +3560,7 @@ Homeserver: - Add support for kicking/banning and power levels. - Allow setting of room names and topics on creation. - Change presence to include last seen time of the user. -- Change url path prefix to /\_matrix/\... +- Change url path prefix to `/_matrix/...` - Bug fixes to presence. Webclient: diff --git a/docs/user_directory.md b/docs/user_directory.md index b33fd2bc2a..1271cfb862 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -83,7 +83,7 @@ The search term is then split into words: available, then the system's [default locale](https://unicode-org.github.io/icu/userguide/locale/#default-locales) will be used to break the search term into words. (See the [installation instructions](setup/installation.md) for how to install ICU.) -* If unavailable, then runs of ASCII characters, numbers, underscores, and hypens +* If unavailable, then runs of ASCII characters, numbers, underscores, and hyphens are considered words. The queries for PostgreSQL and SQLite are detailed below, by their overall goal From 1e67191a79a06fb4031b17c24b4621c066345182 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Oct 2023 11:55:29 +0300 Subject: [PATCH 543/562] Add note to 'federation_domain_whitelist' option (#16416) --- changelog.d/16416.doc | 1 + docs/usage/configuration/config_documentation.md | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/16416.doc diff --git a/changelog.d/16416.doc b/changelog.d/16416.doc new file mode 100644 index 0000000000..be2b7d2805 --- /dev/null +++ b/changelog.d/16416.doc @@ -0,0 +1 @@ +Add note to `federation_domain_whitelist` config option to clarify its usage. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 502cd9f823..92e00c1380 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1190,6 +1190,11 @@ inbound federation traffic as early as possible, rather than relying purely on this application-layer restriction. If not specified, the default is to whitelist everything. +Note: this does not stop a server from joining rooms that servers not on the +whitelist are in. As such, this option is really only useful to establish a +"private federation", where a group of servers all whitelist each other and have +the same whitelist. + Example configuration: ```yaml federation_domain_whitelist: From cce94844523c614ad0b5c30c101618bd5d8f8a66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:57:11 +0100 Subject: [PATCH 544/562] Bump urllib3 from 1.26.15 to 1.26.17 (#16422) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 255396033c..13884e6698 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3197,17 +3197,17 @@ files = [ [[package]] name = "urllib3" -version = "1.26.15" +version = "1.26.17" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, - {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, + {file = "urllib3-1.26.17-py2.py3-none-any.whl", hash = "sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b"}, + {file = "urllib3-1.26.17.tar.gz", hash = "sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] From 8b50a9d01da2c84bb9838287519fa3e0a4e955ce Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 3 Oct 2023 11:50:57 +0100 Subject: [PATCH 545/562] 1.94.0rc1 --- CHANGES.md | 52 +++++++++++++++++++++++++++++++++++++++ changelog.d/14745.misc | 1 - changelog.d/14911.misc | 1 - changelog.d/15691.doc | 1 - changelog.d/15988.feature | 1 - changelog.d/16320.doc | 1 - changelog.d/16332.misc | 1 - changelog.d/16348.misc | 1 - changelog.d/16350.misc | 1 - changelog.d/16355.doc | 1 - changelog.d/16356.misc | 1 - changelog.d/16359.misc | 1 - changelog.d/16360.misc | 1 - changelog.d/16361.feature | 1 - changelog.d/16380.removal | 1 - changelog.d/16381.misc | 1 - changelog.d/16382.doc | 1 - changelog.d/16383.misc | 1 - changelog.d/16385.misc | 1 - changelog.d/16387.misc | 1 - changelog.d/16394.misc | 1 - changelog.d/16395.misc | 1 - changelog.d/16401.misc | 1 - changelog.d/16416.doc | 1 - changelog.d/16418.doc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 27 files changed, 59 insertions(+), 25 deletions(-) delete mode 100644 changelog.d/14745.misc delete mode 100644 changelog.d/14911.misc delete mode 100644 changelog.d/15691.doc delete mode 100644 changelog.d/15988.feature delete mode 100644 changelog.d/16320.doc delete mode 100644 changelog.d/16332.misc delete mode 100644 changelog.d/16348.misc delete mode 100644 changelog.d/16350.misc delete mode 100644 changelog.d/16355.doc delete mode 100644 changelog.d/16356.misc delete mode 100644 changelog.d/16359.misc delete mode 100644 changelog.d/16360.misc delete mode 100644 changelog.d/16361.feature delete mode 100644 changelog.d/16380.removal delete mode 100644 changelog.d/16381.misc delete mode 100644 changelog.d/16382.doc delete mode 100644 changelog.d/16383.misc delete mode 100644 changelog.d/16385.misc delete mode 100644 changelog.d/16387.misc delete mode 100644 changelog.d/16394.misc delete mode 100644 changelog.d/16395.misc delete mode 100644 changelog.d/16401.misc delete mode 100644 changelog.d/16416.doc delete mode 100644 changelog.d/16418.doc diff --git a/CHANGES.md b/CHANGES.md index c1ea40de20..9e73868788 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,55 @@ +# Synapse 1.94.0rc1 (2023-10-03) + +### Features + +- Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) +- Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) +- Minor performance improvement when sending presence to federated servers. ([\#16385](https://github.com/matrix-org/synapse/issues/16385)) +- Minor performance improvement by caching server ACL checking. ([\#16360](https://github.com/matrix-org/synapse/issues/16360)) + +### Improved Documentation + +- Add developer documentation concerning gradual schema migrations with column alterations. ([\#15691](https://github.com/matrix-org/synapse/issues/15691)) +- Improve documentation of the user directory search algorithm. ([\#16320](https://github.com/matrix-org/synapse/issues/16320)) +- Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. ([\#16355](https://github.com/matrix-org/synapse/issues/16355)) +- Update documentation around message retention policies. ([\#16382](https://github.com/matrix-org/synapse/issues/16382)) +- Add note to `federation_domain_whitelist` config option to clarify its usage. ([\#16416](https://github.com/matrix-org/synapse/issues/16416)) +- Improve legacy release notes. ([\#16418](https://github.com/matrix-org/synapse/issues/16418)) + +### Deprecations and Removals + +- Remove Python version from `/_synapse/admin/v1/server_version`. ([\#16380](https://github.com/matrix-org/synapse/issues/16380)) + +### Internal Changes + +- Avoid running CI steps when the files they check have not been changed. ([\#14745](https://github.com/matrix-org/synapse/issues/14745), [\#16387](https://github.com/matrix-org/synapse/issues/16387)) +- Improve type hints. ([\#14911](https://github.com/matrix-org/synapse/issues/14911), [\#16350](https://github.com/matrix-org/synapse/issues/16350), [\#16356](https://github.com/matrix-org/synapse/issues/16356), [\#16395](https://github.com/matrix-org/synapse/issues/16395)) +- Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). ([\#16332](https://github.com/matrix-org/synapse/issues/16332)) +- Get CI to check PRs have been signed-off. ([\#16348](https://github.com/matrix-org/synapse/issues/16348)) +- Add missing licence header. ([\#16359](https://github.com/matrix-org/synapse/issues/16359)) +- Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) +- Improve comments in `StateGroupBackgroundUpdateStore`. ([\#16383](https://github.com/matrix-org/synapse/issues/16383)) +- Update maturin configuration. ([\#16394](https://github.com/matrix-org/synapse/issues/16394)) +- Downgrade replication stream time out error log lines to warning. ([\#16401](https://github.com/matrix-org/synapse/issues/16401)) + +### Updates to locked dependencies + +* Bump actions/checkout from 3 to 4. ([\#16250](https://github.com/matrix-org/synapse/issues/16250)) +* Bump cryptography from 41.0.3 to 41.0.4. ([\#16362](https://github.com/matrix-org/synapse/issues/16362)) +* Bump dawidd6/action-download-artifact from 2.27.0 to 2.28.0. ([\#16374](https://github.com/matrix-org/synapse/issues/16374)) +* Bump docker/setup-buildx-action from 2 to 3. ([\#16375](https://github.com/matrix-org/synapse/issues/16375)) +* Bump gitpython from 3.1.35 to 3.1.37. ([\#16376](https://github.com/matrix-org/synapse/issues/16376)) +* Bump msgpack from 1.0.5 to 1.0.6. ([\#16377](https://github.com/matrix-org/synapse/issues/16377)) +* Bump msgpack from 1.0.6 to 1.0.7. ([\#16412](https://github.com/matrix-org/synapse/issues/16412)) +* Bump phonenumbers from 8.13.19 to 8.13.22. ([\#16413](https://github.com/matrix-org/synapse/issues/16413)) +* Bump psycopg2 from 2.9.7 to 2.9.8. ([\#16409](https://github.com/matrix-org/synapse/issues/16409)) +* Bump pydantic from 2.3.0 to 2.4.2. ([\#16410](https://github.com/matrix-org/synapse/issues/16410)) +* Bump regex from 1.9.5 to 1.9.6. ([\#16408](https://github.com/matrix-org/synapse/issues/16408)) +* Bump sentry-sdk from 1.30.0 to 1.31.0. ([\#16378](https://github.com/matrix-org/synapse/issues/16378)) +* Bump types-netaddr from 0.8.0.9 to 0.9.0.1. ([\#16411](https://github.com/matrix-org/synapse/issues/16411)) +* Bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) +* Bump urllib3 from 1.26.15 to 1.26.17. ([\#16422](https://github.com/matrix-org/synapse/issues/16422)) + # Synapse 1.93.0 (2023-09-26) No significant changes since 1.93.0rc1. diff --git a/changelog.d/14745.misc b/changelog.d/14745.misc deleted file mode 100644 index eae0501d6b..0000000000 --- a/changelog.d/14745.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid running CI steps when the files they check have not been changed. \ No newline at end of file diff --git a/changelog.d/14911.misc b/changelog.d/14911.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/14911.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15691.doc b/changelog.d/15691.doc deleted file mode 100644 index fe649e1027..0000000000 --- a/changelog.d/15691.doc +++ /dev/null @@ -1 +0,0 @@ -Add developer documentation concerning gradual schema migrations with column alterations. \ No newline at end of file diff --git a/changelog.d/15988.feature b/changelog.d/15988.feature deleted file mode 100644 index dee8fa597f..0000000000 --- a/changelog.d/15988.feature +++ /dev/null @@ -1 +0,0 @@ -Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. \ No newline at end of file diff --git a/changelog.d/16320.doc b/changelog.d/16320.doc deleted file mode 100644 index 53e42df56f..0000000000 --- a/changelog.d/16320.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation of the user directory search algorithm. diff --git a/changelog.d/16332.misc b/changelog.d/16332.misc deleted file mode 100644 index 862d547d60..0000000000 --- a/changelog.d/16332.misc +++ /dev/null @@ -1 +0,0 @@ -Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). diff --git a/changelog.d/16348.misc b/changelog.d/16348.misc deleted file mode 100644 index 846bb048c8..0000000000 --- a/changelog.d/16348.misc +++ /dev/null @@ -1 +0,0 @@ -Get CI to check PRs have been signed-off. diff --git a/changelog.d/16350.misc b/changelog.d/16350.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16350.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16355.doc b/changelog.d/16355.doc deleted file mode 100644 index 73d29c7889..0000000000 --- a/changelog.d/16355.doc +++ /dev/null @@ -1 +0,0 @@ -Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. diff --git a/changelog.d/16356.misc b/changelog.d/16356.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16356.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16359.misc b/changelog.d/16359.misc deleted file mode 100644 index 8752085fc6..0000000000 --- a/changelog.d/16359.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing licence header. diff --git a/changelog.d/16360.misc b/changelog.d/16360.misc deleted file mode 100644 index b32d7b521e..0000000000 --- a/changelog.d/16360.misc +++ /dev/null @@ -1 +0,0 @@ -Cache server ACL checking. diff --git a/changelog.d/16361.feature b/changelog.d/16361.feature deleted file mode 100644 index 632fff789b..0000000000 --- a/changelog.d/16361.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. diff --git a/changelog.d/16380.removal b/changelog.d/16380.removal deleted file mode 100644 index 6e9372134d..0000000000 --- a/changelog.d/16380.removal +++ /dev/null @@ -1 +0,0 @@ -Remove Python version from `/_synapse/admin/v1/server_version`. \ No newline at end of file diff --git a/changelog.d/16381.misc b/changelog.d/16381.misc deleted file mode 100644 index a454651952..0000000000 --- a/changelog.d/16381.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. diff --git a/changelog.d/16382.doc b/changelog.d/16382.doc deleted file mode 100644 index 2549586310..0000000000 --- a/changelog.d/16382.doc +++ /dev/null @@ -1 +0,0 @@ -Update documentation around message retention policies. diff --git a/changelog.d/16383.misc b/changelog.d/16383.misc deleted file mode 100644 index d8d84cc184..0000000000 --- a/changelog.d/16383.misc +++ /dev/null @@ -1 +0,0 @@ -Improve comments in `StateGroupBackgroundUpdateStore`. diff --git a/changelog.d/16385.misc b/changelog.d/16385.misc deleted file mode 100644 index d439a931d6..0000000000 --- a/changelog.d/16385.misc +++ /dev/null @@ -1 +0,0 @@ -Minor performance improvement when sending presence to federated servers. diff --git a/changelog.d/16387.misc b/changelog.d/16387.misc deleted file mode 100644 index eae0501d6b..0000000000 --- a/changelog.d/16387.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid running CI steps when the files they check have not been changed. \ No newline at end of file diff --git a/changelog.d/16394.misc b/changelog.d/16394.misc deleted file mode 100644 index ee08c3e024..0000000000 --- a/changelog.d/16394.misc +++ /dev/null @@ -1 +0,0 @@ -Update maturin configuration. diff --git a/changelog.d/16395.misc b/changelog.d/16395.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16395.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16401.misc b/changelog.d/16401.misc deleted file mode 100644 index 86d2749a08..0000000000 --- a/changelog.d/16401.misc +++ /dev/null @@ -1 +0,0 @@ -Downgrade replication stream time out error log lines to warning. diff --git a/changelog.d/16416.doc b/changelog.d/16416.doc deleted file mode 100644 index be2b7d2805..0000000000 --- a/changelog.d/16416.doc +++ /dev/null @@ -1 +0,0 @@ -Add note to `federation_domain_whitelist` config option to clarify its usage. diff --git a/changelog.d/16418.doc b/changelog.d/16418.doc deleted file mode 100644 index 4ec5dbb6b2..0000000000 --- a/changelog.d/16418.doc +++ /dev/null @@ -1 +0,0 @@ -Improve legacy release notes. diff --git a/debian/changelog b/debian/changelog index 7be71019b4..78da69ebb0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.94.0~rc1) stable; urgency=medium + + * New Synapse release 1.94.0rc1. + + -- Synapse Packaging team Tue, 03 Oct 2023 11:48:18 +0100 + matrix-synapse-py3 (1.93.0) stable; urgency=medium * New Synapse release 1.93.0. diff --git a/pyproject.toml b/pyproject.toml index 5fb64479a1..b22172291a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.93.0" +version = "1.94.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From a01ee24734242cf95a29a3631d7f1192cc8bd2af Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 3 Oct 2023 13:21:45 +0100 Subject: [PATCH 546/562] Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9e73868788..6c30c40858 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,8 +2,8 @@ ### Features -- Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) -- Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) +- Render plain, CSS, CSV, JSON and common image formats in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) +- Add experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) - Minor performance improvement when sending presence to federated servers. ([\#16385](https://github.com/matrix-org/synapse/issues/16385)) - Minor performance improvement by caching server ACL checking. ([\#16360](https://github.com/matrix-org/synapse/issues/16360)) From 80ec81dcc54bdb823b95c2f870a919868de9a481 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Oct 2023 18:28:40 +0300 Subject: [PATCH 547/562] Some refactors around receipts stream (#16426) --- changelog.d/16426.misc | 1 + synapse/handlers/appservice.py | 4 +- synapse/handlers/push_rules.py | 6 +- synapse/handlers/receipts.py | 25 ++++---- synapse/notifier.py | 17 +++--- synapse/push/__init__.py | 2 +- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/push/pusherpool.py | 12 +--- synapse/replication/tcp/client.py | 4 +- .../storage/databases/main/e2e_room_keys.py | 2 +- synapse/storage/databases/main/receipts.py | 6 +- synapse/streams/events.py | 15 +++-- synapse/types/__init__.py | 59 ++++++++++++++----- tests/handlers/test_appservice.py | 8 +-- tests/handlers/test_typing.py | 26 +++++--- 16 files changed, 111 insertions(+), 80 deletions(-) create mode 100644 changelog.d/16426.misc diff --git a/changelog.d/16426.misc b/changelog.d/16426.misc new file mode 100644 index 0000000000..208a007171 --- /dev/null +++ b/changelog.d/16426.misc @@ -0,0 +1 @@ +Refactor some code to simplify and better type receipts stream adjacent code. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 7de7bd3289..c200a45f3a 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -216,7 +216,7 @@ class ApplicationServicesHandler: def notify_interested_services_ephemeral( self, - stream_key: str, + stream_key: StreamKeyType, new_token: Union[int, RoomStreamToken], users: Collection[Union[str, UserID]], ) -> None: @@ -326,7 +326,7 @@ class ApplicationServicesHandler: async def _notify_interested_services_ephemeral( self, services: List[ApplicationService], - stream_key: str, + stream_key: StreamKeyType, new_token: int, users: Collection[Union[str, UserID]], ) -> None: diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 7ed88a3611..87b428ab1c 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError, UnrecognizedRequestError from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.push_rule import RuleNotFoundException from synapse.synapse_rust.push import get_base_rule_ids -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, StreamKeyType, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -114,7 +114,9 @@ class PushRulesHandler: user_id: the user ID the change is for. """ stream_id = self._main_store.get_max_push_rules_stream_id() - self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) + self._notifier.on_new_event( + StreamKeyType.PUSH_RULES, stream_id, users=[user_id] + ) async def push_rules_for_user( self, user: UserID diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a7a29b758b..69ac468f75 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -130,11 +130,10 @@ class ReceiptsHandler: async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool: """Takes a list of receipts, stores them and informs the notifier.""" - min_batch_id: Optional[int] = None - max_batch_id: Optional[int] = None + receipts_persisted: List[ReadReceipt] = [] for receipt in receipts: - res = await self.store.insert_receipt( + stream_id = await self.store.insert_receipt( receipt.room_id, receipt.receipt_type, receipt.user_id, @@ -143,30 +142,26 @@ class ReceiptsHandler: receipt.data, ) - if not res: - # res will be None if this receipt is 'old' + if stream_id is None: + # stream_id will be None if this receipt is 'old' continue - stream_id, max_persisted_id = res + receipts_persisted.append(receipt) - if min_batch_id is None or stream_id < min_batch_id: - min_batch_id = stream_id - if max_batch_id is None or max_persisted_id > max_batch_id: - max_batch_id = max_persisted_id - - # Either both of these should be None or neither. - if min_batch_id is None or max_batch_id is None: + if not receipts_persisted: # no new receipts return False - affected_room_ids = list({r.room_id for r in receipts}) + max_batch_id = self.store.get_max_receipt_stream_id() + + affected_room_ids = list({r.room_id for r in receipts_persisted}) self.notifier.on_new_event( StreamKeyType.RECEIPT, max_batch_id, rooms=affected_room_ids ) # Note that the min here shouldn't be relied upon to be accurate. await self.hs.get_pusherpool().on_new_receipts( - min_batch_id, max_batch_id, affected_room_ids + {r.user_id for r in receipts_persisted} ) return True diff --git a/synapse/notifier.py b/synapse/notifier.py index fc39e5c963..99e7715896 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -126,7 +126,7 @@ class _NotifierUserStream: def notify( self, - stream_key: str, + stream_key: StreamKeyType, stream_id: Union[int, RoomStreamToken], time_now_ms: int, ) -> None: @@ -454,7 +454,7 @@ class Notifier: def on_new_event( self, - stream_key: str, + stream_key: StreamKeyType, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[StrCollection] = None, @@ -655,30 +655,29 @@ class Notifier: events: List[Union[JsonDict, EventBase]] = [] end_token = from_token - for name, source in self.event_sources.sources.get_sources(): - keyname = "%s_key" % name - before_id = getattr(before_token, keyname) - after_id = getattr(after_token, keyname) + for keyname, source in self.event_sources.sources.get_sources(): + before_id = before_token.get_field(keyname) + after_id = after_token.get_field(keyname) if before_id == after_id: continue new_events, new_key = await source.get_new_events( user=user, - from_key=getattr(from_token, keyname), + from_key=from_token.get_field(keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) - if name == "room": + if keyname == StreamKeyType.ROOM: new_events = await filter_events_for_client( self._storage_controllers, user.to_string(), new_events, is_peeking=is_peeking, ) - elif name == "presence": + elif keyname == StreamKeyType.PRESENCE: now = self.clock.time_msec() new_events[:] = [ { diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9e3a98741a..9e5eb2a445 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -182,7 +182,7 @@ class Pusher(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def on_new_receipts(self, min_stream_id: int, max_stream_id: int) -> None: + def on_new_receipts(self) -> None: raise NotImplementedError() @abc.abstractmethod diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 1710dd51b9..cf45fd09a8 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -99,7 +99,7 @@ class EmailPusher(Pusher): pass self.timed_call = None - def on_new_receipts(self, min_stream_id: int, max_stream_id: int) -> None: + def on_new_receipts(self) -> None: # We could wake up and cancel the timer but there tend to be quite a # lot of read receipts so it's probably less work to just let the # timer fire diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 50027680cb..725910a659 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -160,7 +160,7 @@ class HttpPusher(Pusher): if should_check_for_notifs: self._start_processing() - def on_new_receipts(self, min_stream_id: int, max_stream_id: int) -> None: + def on_new_receipts(self) -> None: # Note that the min here shouldn't be relied upon to be accurate. # We could check the receipts are actually m.read receipts here, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 6517e3566f..15a2cc932f 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -292,20 +292,12 @@ class PusherPool: except Exception: logger.exception("Exception in pusher on_new_notifications") - async def on_new_receipts( - self, min_stream_id: int, max_stream_id: int, affected_room_ids: Iterable[str] - ) -> None: + async def on_new_receipts(self, users_affected: StrCollection) -> None: if not self.pushers: # nothing to do here. return try: - # Need to subtract 1 from the minimum because the lower bound here - # is not inclusive - users_affected = await self.store.get_users_sent_receipts_between( - min_stream_id - 1, max_stream_id - ) - for u in users_affected: # Don't push if the user account has expired expired = await self._account_validity_handler.is_user_expired(u) @@ -314,7 +306,7 @@ class PusherPool: if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_receipts(min_stream_id, max_stream_id) + p.on_new_receipts() except Exception: logger.exception("Exception in pusher on_new_receipts") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index f4f2b29e96..d5337fe588 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -129,9 +129,7 @@ class ReplicationDataHandler: self.notifier.on_new_event( StreamKeyType.RECEIPT, token, rooms=[row.room_id for row in rows] ) - await self._pusher_pool.on_new_receipts( - token, token, {row.room_id for row in rows} - ) + await self._pusher_pool.on_new_receipts({row.user_id for row in rows}) elif stream_name == ToDeviceStream.NAME: entities = [row.entity for row in rows if row.entity.startswith("@")] if entities: diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index d01f28cc80..bc7c6a6346 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -208,7 +208,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): "message": "Set room key", "room_id": room_id, "session_id": session_id, - StreamKeyType.ROOM: room_key, + StreamKeyType.ROOM.value: room_key, } ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 0231f9407b..3bab1024ea 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -742,7 +742,7 @@ class ReceiptsWorkerStore(SQLBaseStore): event_ids: List[str], thread_id: Optional[str], data: dict, - ) -> Optional[Tuple[int, int]]: + ) -> Optional[int]: """Insert a receipt, either from local client or remote server. Automatically does conversion between linearized and graph @@ -804,9 +804,7 @@ class ReceiptsWorkerStore(SQLBaseStore): data, ) - max_persisted_id = self._receipts_id_gen.get_current_token() - - return stream_id, max_persisted_id + return stream_id async def _insert_graph_receipt( self, diff --git a/synapse/streams/events.py b/synapse/streams/events.py index d7084d2358..609a0978a9 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Iterator, Tuple +from typing import TYPE_CHECKING, Sequence, Tuple import attr @@ -23,7 +23,7 @@ from synapse.handlers.room import RoomEventSource from synapse.handlers.typing import TypingNotificationEventSource from synapse.logging.opentracing import trace from synapse.streams import EventSource -from synapse.types import StreamToken +from synapse.types import StreamKeyType, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer @@ -37,9 +37,14 @@ class _EventSourcesInner: receipt: ReceiptEventSource account_data: AccountDataEventSource - def get_sources(self) -> Iterator[Tuple[str, EventSource]]: - for attribute in attr.fields(_EventSourcesInner): - yield attribute.name, getattr(self, attribute.name) + def get_sources(self) -> Sequence[Tuple[StreamKeyType, EventSource]]: + return [ + (StreamKeyType.ROOM, self.room), + (StreamKeyType.PRESENCE, self.presence), + (StreamKeyType.TYPING, self.typing), + (StreamKeyType.RECEIPT, self.receipt), + (StreamKeyType.ACCOUNT_DATA, self.account_data), + ] class EventSources: diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 76b0e3e694..406d5b1611 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -22,8 +22,8 @@ from typing import ( Any, ClassVar, Dict, - Final, List, + Literal, Mapping, Match, MutableMapping, @@ -34,6 +34,7 @@ from typing import ( Type, TypeVar, Union, + overload, ) import attr @@ -649,20 +650,20 @@ class RoomStreamToken: return "s%d" % (self.stream,) -class StreamKeyType: +class StreamKeyType(Enum): """Known stream types. A stream is a list of entities ordered by an incrementing "stream token". """ - ROOM: Final = "room_key" - PRESENCE: Final = "presence_key" - TYPING: Final = "typing_key" - RECEIPT: Final = "receipt_key" - ACCOUNT_DATA: Final = "account_data_key" - PUSH_RULES: Final = "push_rules_key" - TO_DEVICE: Final = "to_device_key" - DEVICE_LIST: Final = "device_list_key" + ROOM = "room_key" + PRESENCE = "presence_key" + TYPING = "typing_key" + RECEIPT = "receipt_key" + ACCOUNT_DATA = "account_data_key" + PUSH_RULES = "push_rules_key" + TO_DEVICE = "to_device_key" + DEVICE_LIST = "device_list_key" UN_PARTIAL_STATED_ROOMS = "un_partial_stated_rooms_key" @@ -784,7 +785,7 @@ class StreamToken: def room_stream_id(self) -> int: return self.room_key.stream - def copy_and_advance(self, key: str, new_value: Any) -> "StreamToken": + def copy_and_advance(self, key: StreamKeyType, new_value: Any) -> "StreamToken": """Advance the given key in the token to a new value if and only if the new value is after the old value. @@ -797,16 +798,44 @@ class StreamToken: return new_token new_token = self.copy_and_replace(key, new_value) - new_id = int(getattr(new_token, key)) - old_id = int(getattr(self, key)) + new_id = new_token.get_field(key) + old_id = self.get_field(key) if old_id < new_id: return new_token else: return self - def copy_and_replace(self, key: str, new_value: Any) -> "StreamToken": - return attr.evolve(self, **{key: new_value}) + def copy_and_replace(self, key: StreamKeyType, new_value: Any) -> "StreamToken": + return attr.evolve(self, **{key.value: new_value}) + + @overload + def get_field(self, key: Literal[StreamKeyType.ROOM]) -> RoomStreamToken: + ... + + @overload + def get_field( + self, + key: Literal[ + StreamKeyType.ACCOUNT_DATA, + StreamKeyType.DEVICE_LIST, + StreamKeyType.PRESENCE, + StreamKeyType.PUSH_RULES, + StreamKeyType.RECEIPT, + StreamKeyType.TO_DEVICE, + StreamKeyType.TYPING, + StreamKeyType.UN_PARTIAL_STATED_ROOMS, + ], + ) -> int: + ... + + @overload + def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + ... + + def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + """Returns the stream ID for the given key.""" + return getattr(self, key.value) StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0, 0) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index a7e6cdd66a..8ce6ccf529 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -31,7 +31,7 @@ from synapse.appservice import ( from synapse.handlers.appservice import ApplicationServicesHandler from synapse.rest.client import login, receipts, register, room, sendtodevice from synapse.server import HomeServer -from synapse.types import JsonDict, RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken, StreamKeyType from synapse.util import Clock from synapse.util.stringutils import random_string @@ -304,7 +304,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.handler.notify_interested_services_ephemeral( - "receipt_key", 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] ) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, ephemeral=[event] @@ -332,7 +332,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.handler.notify_interested_services_ephemeral( - "receipt_key", 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] ) # This method will be called, but with an empty list of events self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( @@ -634,7 +634,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): self.get_success( self.hs.get_application_service_handler()._notify_interested_services_ephemeral( services=[interested_appservice], - stream_key="receipt_key", + stream_key=StreamKeyType.RECEIPT, new_token=stream_token, users=[self.exclusive_as_user], ) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 95106ec8f3..3060bc9744 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -28,7 +28,7 @@ from synapse.federation.transport.server import TransportLayerServer from synapse.handlers.typing import TypingWriterHandler from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.server import HomeServer -from synapse.types import JsonDict, Requester, UserID, create_requester +from synapse.types import JsonDict, Requester, StreamKeyType, UserID, create_requester from synapse.util import Clock from tests import unittest @@ -203,7 +203,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( @@ -273,7 +275,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( @@ -349,7 +353,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.mock_federation_client.put_json.assert_called_once_with( "farm", @@ -399,7 +405,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.on_new_event.reset_mock() self.assertEqual(self.event_source.get_current_key(), 1) @@ -425,7 +433,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.reactor.pump([16]) - self.on_new_event.assert_has_calls([call("typing_key", 2, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 2, rooms=[ROOM_ID])] + ) self.assertEqual(self.event_source.get_current_key(), 2) events = self.get_success( @@ -459,7 +469,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 3, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 3, rooms=[ROOM_ID])] + ) self.on_new_event.reset_mock() self.assertEqual(self.event_source.get_current_key(), 3) From ab9c1e8f3951dcdb9d628b7ed155de543c046c44 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 4 Oct 2023 13:53:04 -0400 Subject: [PATCH 548/562] Add type hints to synmark. (#16421) --- changelog.d/16421.misc | 1 + mypy.ini | 4 ++ synmark/__init__.py | 9 +++-- synmark/__main__.py | 48 ++++++++++++++-------- synmark/suites/logging.py | 68 ++++++++++++++++++++------------ synmark/suites/lrucache.py | 5 ++- synmark/suites/lrucache_evict.py | 5 ++- 7 files changed, 91 insertions(+), 49 deletions(-) create mode 100644 changelog.d/16421.misc diff --git a/changelog.d/16421.misc b/changelog.d/16421.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16421.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 88aea301b9..fdfe9432fc 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,6 +32,7 @@ files = docker/, scripts-dev/, synapse/, + synmark/, tests/, build_rust.py @@ -80,6 +81,9 @@ ignore_missing_imports = True [mypy-pympler.*] ignore_missing_imports = True +[mypy-pyperf.*] +ignore_missing_imports = True + [mypy-rust_python_jaeger_reporter.*] ignore_missing_imports = True diff --git a/synmark/__init__.py b/synmark/__init__.py index 2cc00b0f03..f213319542 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -13,15 +13,18 @@ # limitations under the License. import sys +from typing import cast + +from synapse.types import ISynapseReactor try: from twisted.internet.epollreactor import EPollReactor as Reactor except ImportError: - from twisted.internet.pollreactor import PollReactor as Reactor + from twisted.internet.pollreactor import PollReactor as Reactor # type: ignore[assignment] from twisted.internet.main import installReactor -def make_reactor(): +def make_reactor() -> ISynapseReactor: """ Instantiate and install a Twisted reactor suitable for testing (i.e. not the default global one). @@ -32,4 +35,4 @@ def make_reactor(): del sys.modules["twisted.internet.reactor"] installReactor(reactor) - return reactor + return cast(ISynapseReactor, reactor) diff --git a/synmark/__main__.py b/synmark/__main__.py index 19de639187..397dd86576 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import sys -from argparse import REMAINDER +from argparse import REMAINDER, Namespace from contextlib import redirect_stderr from io import StringIO +from typing import Any, Callable, Coroutine, List, TypeVar import pyperf @@ -22,44 +23,50 @@ from twisted.internet.defer import Deferred, ensureDeferred from twisted.logger import globalLogBeginner, textFileLogObserver from twisted.python.failure import Failure +from synapse.types import ISynapseReactor from synmark import make_reactor from synmark.suites import SUITES from tests.utils import setupdb +T = TypeVar("T") -def make_test(main): + +def make_test( + main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]] +) -> Callable[[int], float]: """ Take a benchmark function and wrap it in a reactor start and stop. """ - def _main(loops): + def _main(loops: int) -> float: reactor = make_reactor() file_out = StringIO() with redirect_stderr(file_out): - d = Deferred() + d: "Deferred[float]" = Deferred() d.addCallback(lambda _: ensureDeferred(main(reactor, loops))) - def on_done(_): - if isinstance(_, Failure): - _.printTraceback() + def on_done(res: T) -> T: + if isinstance(res, Failure): + res.printTraceback() print(file_out.getvalue()) reactor.stop() - return _ + return res d.addBoth(on_done) reactor.callWhenRunning(lambda: d.callback(True)) reactor.run() - return d.result + # mypy thinks this is an object for some reason. + return d.result # type: ignore[return-value] return _main if __name__ == "__main__": - def add_cmdline_args(cmd, args): + def add_cmdline_args(cmd: List[str], args: Namespace) -> None: if args.log: cmd.extend(["--log"]) cmd.extend(args.tests) @@ -82,17 +89,26 @@ if __name__ == "__main__": setupdb() if runner.args.tests: - SUITES = list( - filter(lambda x: x[0].__name__.split(".")[-1] in runner.args.tests, SUITES) - ) + existing_suites = {s.__name__.split(".")[-1] for s, _ in SUITES} + for test in runner.args.tests: + if test not in existing_suites: + print(f"Test suite {test} does not exist.") + exit(-1) - for suite, loops in SUITES: + suites = list( + filter(lambda t: t[0].__name__.split(".")[-1] in runner.args.tests, SUITES) + ) + else: + suites = SUITES + + for suite, loops in suites: if loops: runner.args.loops = loops + loops_desc = str(loops) else: runner.args.loops = orig_loops - loops = "auto" + loops_desc = "auto" runner.bench_time_func( - suite.__name__ + "_" + str(loops), + suite.__name__ + "_" + loops_desc, make_test(suite.main), ) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index 04e5b29dc9..e160443643 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -11,14 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging +import logging.config import warnings from io import StringIO +from typing import Optional from unittest.mock import Mock from pyperf import perf_counter +from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import Deferred from twisted.internet.protocol import ServerFactory from twisted.logger import LogBeginner, LogPublisher @@ -26,45 +28,53 @@ from twisted.protocols.basic import LineOnlyReceiver from synapse.config.logger import _setup_stdlib_logging from synapse.logging import RemoteHandler +from synapse.synapse_rust import reset_logging_config +from synapse.types import ISynapseReactor from synapse.util import Clock class LineCounter(LineOnlyReceiver): delimiter = b"\n" + count = 0 - def __init__(self, *args, **kwargs): - self.count = 0 - super().__init__(*args, **kwargs) - - def lineReceived(self, line): + def lineReceived(self, line: bytes) -> None: self.count += 1 + assert isinstance(self.factory, Factory) + if self.count >= self.factory.wait_for and self.factory.on_done: on_done = self.factory.on_done self.factory.on_done = None on_done.callback(True) -async def main(reactor, loops): +class Factory(ServerFactory): + protocol = LineCounter + wait_for: int + on_done: Optional[Deferred] + + +async def main(reactor: ISynapseReactor, loops: int) -> float: """ Benchmark how long it takes to send `loops` messages. """ - servers = [] - def protocol(): - p = LineCounter() - servers.append(p) - return p - - logger_factory = ServerFactory.forProtocol(protocol) + logger_factory = Factory() logger_factory.wait_for = loops logger_factory.on_done = Deferred() - port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1") + port = reactor.listenTCP(0, logger_factory, backlog=50, interface="127.0.0.1") # A fake homeserver config. class Config: - server_name = "synmark-" + str(loops) - no_redirect_stdio = True + class server: + server_name = "synmark-" + str(loops) + + # This odd construct is to avoid mypy thinking that logging escapes the + # scope of Config. + class _logging: + no_redirect_stdio = True + + logging = _logging hs_config = Config() @@ -78,28 +88,34 @@ async def main(reactor, loops): publisher, errors, mock_sys, warnings, initialBufferSize=loops ) + address = port.getHost() + assert isinstance(address, (IPv4Address, IPv6Address)) log_config = { "version": 1, - "loggers": {"synapse": {"level": "DEBUG", "handlers": ["tersejson"]}}, + "loggers": {"synapse": {"level": "DEBUG", "handlers": ["remote"]}}, "formatters": {"tersejson": {"class": "synapse.logging.TerseJsonFormatter"}}, "handlers": { - "tersejson": { + "remote": { "class": "synapse.logging.RemoteHandler", - "host": "127.0.0.1", - "port": port.getHost().port, + "formatter": "tersejson", + "host": address.host, + "port": address.port, "maximum_buffer": 100, - "_reactor": reactor, } }, } - logger = logging.getLogger("synapse.logging.test_terse_json") + logger = logging.getLogger("synapse") _setup_stdlib_logging( - hs_config, - log_config, + hs_config, # type: ignore[arg-type] + None, logBeginner=beginner, ) + # Force a new logging config without having to load it from a file. + logging.config.dictConfig(log_config) + reset_logging_config() + # Wait for it to connect... for handler in logging.getLogger("synapse").handlers: if isinstance(handler, RemoteHandler): @@ -107,7 +123,7 @@ async def main(reactor, loops): else: raise RuntimeError("Improperly configured: no RemoteHandler found.") - await handler._service.whenConnected() + await handler._service.whenConnected(failAfterFailures=10) start = perf_counter() diff --git a/synmark/suites/lrucache.py b/synmark/suites/lrucache.py index 9b4a424149..cfa0163c62 100644 --- a/synmark/suites/lrucache.py +++ b/synmark/suites/lrucache.py @@ -14,14 +14,15 @@ from pyperf import perf_counter +from synapse.types import ISynapseReactor from synapse.util.caches.lrucache import LruCache -async def main(reactor, loops): +async def main(reactor: ISynapseReactor, loops: int) -> float: """ Benchmark `loops` number of insertions into LruCache without eviction. """ - cache = LruCache(loops) + cache: LruCache[int, bool] = LruCache(loops) start = perf_counter() diff --git a/synmark/suites/lrucache_evict.py b/synmark/suites/lrucache_evict.py index 0ee202ed36..02238c2627 100644 --- a/synmark/suites/lrucache_evict.py +++ b/synmark/suites/lrucache_evict.py @@ -14,15 +14,16 @@ from pyperf import perf_counter +from synapse.types import ISynapseReactor from synapse.util.caches.lrucache import LruCache -async def main(reactor, loops): +async def main(reactor: ISynapseReactor, loops: int) -> float: """ Benchmark `loops` number of insertions into LruCache where half of them are evicted. """ - cache = LruCache(loops // 2) + cache: LruCache[int, bool] = LruCache(loops // 2) start = perf_counter() From 009b47badfed7593cff5f8acbd61e8fddb3ca788 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 5 Oct 2023 12:46:28 +0300 Subject: [PATCH 549/562] Factor out `MultiWriter` token from `RoomStreamToken` (#16427) --- changelog.d/16427.misc | 1 + synapse/handlers/admin.py | 4 +- synapse/handlers/initial_sync.py | 3 +- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/rest/admin/__init__.py | 2 +- synapse/storage/databases/main/stream.py | 22 ++-- synapse/types/__init__.py | 132 ++++++++++++++++------- tests/handlers/test_appservice.py | 8 +- 9 files changed, 115 insertions(+), 61 deletions(-) create mode 100644 changelog.d/16427.misc diff --git a/changelog.d/16427.misc b/changelog.d/16427.misc new file mode 100644 index 0000000000..44f0e0595e --- /dev/null +++ b/changelog.d/16427.misc @@ -0,0 +1 @@ +Factor out `MultiWriter` token from `RoomStreamToken`. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index ba9704a065..97fd1fd427 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -171,8 +171,8 @@ class AdminHandler: else: stream_ordering = room.stream_ordering - from_key = RoomStreamToken(0, 0) - to_key = RoomStreamToken(None, stream_ordering) + from_key = RoomStreamToken(topological=0, stream=0) + to_key = RoomStreamToken(stream=stream_ordering) # Events that we've processed in this room written_events: Set[str] = set() diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 5737f8014d..c34bd7db95 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -192,8 +192,7 @@ class InitialSyncHandler: ) elif event.membership == Membership.LEAVE: room_end_token = RoomStreamToken( - None, - event.stream_ordering, + stream=event.stream_ordering, ) deferred_room_state = run_in_background( self._state_storage_controller.get_state_for_events, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a0c3b16819..4cdf0a8502 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1708,7 +1708,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): if from_key.topological: logger.warning("Stream has topological part!!!! %r", from_key) - from_key = RoomStreamToken(None, from_key.stream) + from_key = RoomStreamToken(stream=from_key.stream) app_service = self.store.get_app_service_by_user_id(user.to_string()) if app_service: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7bd42f635f..744e080309 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -2333,7 +2333,7 @@ class SyncHandler: continue leave_token = now_token.copy_and_replace( - StreamKeyType.ROOM, RoomStreamToken(None, event.stream_ordering) + StreamKeyType.ROOM, RoomStreamToken(stream=event.stream_ordering) ) room_entries.append( RoomSyncResultBuilder( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index e42dade246..9bd0d764f8 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -146,7 +146,7 @@ class PurgeHistoryRestServlet(RestServlet): # RoomStreamToken expects [int] not Optional[int] assert event.internal_metadata.stream_ordering is not None room_token = RoomStreamToken( - event.depth, event.internal_metadata.stream_ordering + topological=event.depth, stream=event.internal_metadata.stream_ordering ) token = await room_token.to_string(self.store) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 5a3611c415..ea06e4eee0 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -266,7 +266,7 @@ def generate_next_token( # when we are going backwards so we subtract one from the # stream part. last_stream_ordering -= 1 - return RoomStreamToken(last_topo_ordering, last_stream_ordering) + return RoomStreamToken(topological=last_topo_ordering, stream=last_stream_ordering) def _make_generic_sql_bound( @@ -558,7 +558,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if p > min_pos } - return RoomStreamToken(None, min_pos, immutabledict(positions)) + return RoomStreamToken(stream=min_pos, instance_map=immutabledict(positions)) async def get_room_events_stream_for_rooms( self, @@ -708,7 +708,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ret.reverse() if rows: - key = RoomStreamToken(None, min(r.stream_ordering for r in rows)) + key = RoomStreamToken(stream=min(r.stream_ordering for r in rows)) else: # Assume we didn't get anything because there was nothing to # get. @@ -969,7 +969,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topo = await self.db_pool.runInteraction( "_get_max_topological_txn", self._get_max_topological_txn, room_id ) - return RoomStreamToken(topo, stream_ordering) + return RoomStreamToken(topological=topo, stream=stream_ordering) @overload def get_stream_id_for_event_txn( @@ -1033,7 +1033,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): retcols=("stream_ordering", "topological_ordering"), desc="get_topological_token_for_event", ) - return RoomStreamToken(row["topological_ordering"], row["stream_ordering"]) + return RoomStreamToken( + topological=row["topological_ordering"], stream=row["stream_ordering"] + ) async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: """Gets the topological token in a room after or at the given stream @@ -1114,8 +1116,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): else: topo = None internal = event.internal_metadata - internal.before = RoomStreamToken(topo, stream - 1) - internal.after = RoomStreamToken(topo, stream) + internal.before = RoomStreamToken(topological=topo, stream=stream - 1) + internal.after = RoomStreamToken(topological=topo, stream=stream) internal.order = (int(topo) if topo else 0, int(stream)) async def get_events_around( @@ -1191,11 +1193,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # Paginating backwards includes the event at the token, but paginating # forward doesn't. before_token = RoomStreamToken( - results["topological_ordering"] - 1, results["stream_ordering"] + topological=results["topological_ordering"] - 1, + stream=results["stream_ordering"], ) after_token = RoomStreamToken( - results["topological_ordering"], results["stream_ordering"] + topological=results["topological_ordering"], + stream=results["stream_ordering"], ) rows, start_token = self._paginate_room_events_txn( diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 406d5b1611..09a88c86a7 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -61,6 +61,8 @@ from synapse.util.cancellation import cancellable from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: + from typing_extensions import Self + from synapse.appservice.api import ApplicationService from synapse.storage.databases.main import DataStore, PurgeEventsStore from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore @@ -437,7 +439,78 @@ def map_username_to_mxid_localpart( @attr.s(frozen=True, slots=True, order=False) -class RoomStreamToken: +class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): + """An abstract stream token class for streams that supports multiple + writers. + + This works by keeping track of the stream position of each writer, + represented by a default `stream` attribute and a map of instance name to + stream position of any writers that are ahead of the default stream + position. + """ + + stream: int = attr.ib(validator=attr.validators.instance_of(int), kw_only=True) + + instance_map: "immutabledict[str, int]" = attr.ib( + factory=immutabledict, + validator=attr.validators.deep_mapping( + key_validator=attr.validators.instance_of(str), + value_validator=attr.validators.instance_of(int), + mapping_validator=attr.validators.instance_of(immutabledict), + ), + kw_only=True, + ) + + @classmethod + @abc.abstractmethod + async def parse(cls, store: "DataStore", string: str) -> "Self": + """Parse the string representation of the token.""" + ... + + @abc.abstractmethod + async def to_string(self, store: "DataStore") -> str: + """Serialize the token into its string representation.""" + ... + + def copy_and_advance(self, other: "Self") -> "Self": + """Return a new token such that if an event is after both this token and + the other token, then its after the returned token too. + """ + + max_stream = max(self.stream, other.stream) + + instance_map = { + instance: max( + self.instance_map.get(instance, self.stream), + other.instance_map.get(instance, other.stream), + ) + for instance in set(self.instance_map).union(other.instance_map) + } + + return attr.evolve( + self, stream=max_stream, instance_map=immutabledict(instance_map) + ) + + def get_max_stream_pos(self) -> int: + """Get the maximum stream position referenced in this token. + + The corresponding "min" position is, by definition just `self.stream`. + + This is used to handle tokens that have non-empty `instance_map`, and so + reference stream positions after the `self.stream` position. + """ + return max(self.instance_map.values(), default=self.stream) + + def get_stream_pos_for_instance(self, instance_name: str) -> int: + """Get the stream position that the given writer was at at this token.""" + + # If we don't have an entry for the instance we can assume that it was + # at `self.stream`. + return self.instance_map.get(instance_name, self.stream) + + +@attr.s(frozen=True, slots=True, order=False) +class RoomStreamToken(AbstractMultiWriterStreamToken): """Tokens are positions between events. The token "s1" comes after event 1. s0 s1 @@ -514,16 +587,8 @@ class RoomStreamToken: topological: Optional[int] = attr.ib( validator=attr.validators.optional(attr.validators.instance_of(int)), - ) - stream: int = attr.ib(validator=attr.validators.instance_of(int)) - - instance_map: "immutabledict[str, int]" = attr.ib( - factory=immutabledict, - validator=attr.validators.deep_mapping( - key_validator=attr.validators.instance_of(str), - value_validator=attr.validators.instance_of(int), - mapping_validator=attr.validators.instance_of(immutabledict), - ), + kw_only=True, + default=None, ) def __attrs_post_init__(self) -> None: @@ -583,17 +648,7 @@ class RoomStreamToken: if self.topological or other.topological: raise Exception("Can't advance topological tokens") - max_stream = max(self.stream, other.stream) - - instance_map = { - instance: max( - self.instance_map.get(instance, self.stream), - other.instance_map.get(instance, other.stream), - ) - for instance in set(self.instance_map).union(other.instance_map) - } - - return RoomStreamToken(None, max_stream, immutabledict(instance_map)) + return super().copy_and_advance(other) def as_historical_tuple(self) -> Tuple[int, int]: """Returns a tuple of `(topological, stream)` for historical tokens. @@ -619,16 +674,6 @@ class RoomStreamToken: # at `self.stream`. return self.instance_map.get(instance_name, self.stream) - def get_max_stream_pos(self) -> int: - """Get the maximum stream position referenced in this token. - - The corresponding "min" position is, by definition just `self.stream`. - - This is used to handle tokens that have non-empty `instance_map`, and so - reference stream positions after the `self.stream` position. - """ - return max(self.instance_map.values(), default=self.stream) - async def to_string(self, store: "DataStore") -> str: if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) @@ -838,23 +883,28 @@ class StreamToken: return getattr(self, key.value) -StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0, 0) +StreamToken.START = StreamToken(RoomStreamToken(stream=0), 0, 0, 0, 0, 0, 0, 0, 0, 0) @attr.s(slots=True, frozen=True, auto_attribs=True) -class PersistedEventPosition: +class PersistedPosition: + """Position of a newly persisted row with instance that persisted it.""" + + instance_name: str + stream: int + + def persisted_after(self, token: AbstractMultiWriterStreamToken) -> bool: + return token.get_stream_pos_for_instance(self.instance_name) < self.stream + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class PersistedEventPosition(PersistedPosition): """Position of a newly persisted event with instance that persisted it. This can be used to test whether the event is persisted before or after a RoomStreamToken. """ - instance_name: str - stream: int - - def persisted_after(self, token: RoomStreamToken) -> bool: - return token.get_stream_pos_for_instance(self.instance_name) < self.stream - def to_room_stream_token(self) -> RoomStreamToken: """Converts the position to a room stream token such that events persisted in the same room after this position will be after the @@ -865,7 +915,7 @@ class PersistedEventPosition: """ # Doing the naive thing satisfies the desired properties described in # the docstring. - return RoomStreamToken(None, self.stream) + return RoomStreamToken(stream=self.stream) @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 8ce6ccf529..867dbd6001 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -86,7 +86,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): [event], ] ) - self.handler.notify_interested_services(RoomStreamToken(None, 1)) + self.handler.notify_interested_services(RoomStreamToken(stream=1)) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, events=[event] @@ -107,7 +107,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ] ) self.mock_store.get_events_as_list = AsyncMock(side_effect=[[event]]) - self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.handler.notify_interested_services(RoomStreamToken(stream=0)) self.mock_as_api.query_user.assert_called_once_with(services[0], user_id) @@ -126,7 +126,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ] ) - self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.handler.notify_interested_services(RoomStreamToken(stream=0)) self.assertFalse( self.mock_as_api.query_user.called, @@ -441,7 +441,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): self.get_success( self.hs.get_application_service_handler()._notify_interested_services( RoomStreamToken( - None, self.hs.get_application_service_handler().current_max + stream=self.hs.get_application_service_handler().current_max ) ) ) From 4e302b30b6f29bd6f1edf7e7dfb835a959fc66e4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 Oct 2023 07:38:55 -0400 Subject: [PATCH 550/562] Add __slots__ to replication commands. (#16429) To slightly reduce the amount of memory each command takes. --- changelog.d/16429.misc | 1 + synapse/replication/tcp/commands.py | 27 ++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16429.misc diff --git a/changelog.d/16429.misc b/changelog.d/16429.misc new file mode 100644 index 0000000000..4241e143be --- /dev/null +++ b/changelog.d/16429.misc @@ -0,0 +1 @@ +Reduce the size of each replication command instance. diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index e616b5e1c8..1b92302fd3 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -18,7 +18,7 @@ allowed to be sent by which side. """ import abc import logging -from typing import Optional, Tuple, Type, TypeVar +from typing import List, Optional, Tuple, Type, TypeVar from synapse.replication.tcp.streams._base import StreamRow from synapse.util import json_decoder, json_encoder @@ -74,6 +74,8 @@ SC = TypeVar("SC", bound="_SimpleCommand") class _SimpleCommand(Command): """An implementation of Command whose argument is just a 'data' string.""" + __slots__ = ["data"] + def __init__(self, data: str): self.data = data @@ -122,6 +124,8 @@ class RdataCommand(Command): RDATA presence master 59 ["@baz:example.com", "online", ...] """ + __slots__ = ["stream_name", "instance_name", "token", "row"] + NAME = "RDATA" def __init__( @@ -179,6 +183,8 @@ class PositionCommand(Command): of the stream. """ + __slots__ = ["stream_name", "instance_name", "prev_token", "new_token"] + NAME = "POSITION" def __init__( @@ -235,6 +241,8 @@ class ReplicateCommand(Command): REPLICATE """ + __slots__: List[str] = [] + NAME = "REPLICATE" def __init__(self) -> None: @@ -264,6 +272,8 @@ class UserSyncCommand(Command): Where is either "start" or "end" """ + __slots__ = ["instance_id", "user_id", "device_id", "is_syncing", "last_sync_ms"] + NAME = "USER_SYNC" def __init__( @@ -316,6 +326,8 @@ class ClearUserSyncsCommand(Command): CLEAR_USER_SYNC """ + __slots__ = ["instance_id"] + NAME = "CLEAR_USER_SYNC" def __init__(self, instance_id: str): @@ -343,6 +355,8 @@ class FederationAckCommand(Command): FEDERATION_ACK """ + __slots__ = ["instance_name", "token"] + NAME = "FEDERATION_ACK" def __init__(self, instance_name: str, token: int): @@ -368,6 +382,15 @@ class UserIpCommand(Command): USER_IP , , , , , """ + __slots__ = [ + "user_id", + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ] + NAME = "USER_IP" def __init__( @@ -441,6 +464,8 @@ class LockReleasedCommand(Command): LOCK_RELEASED ["", "", ""] """ + __slots__ = ["instance_name", "lock_name", "lock_key"] + NAME = "LOCK_RELEASED" def __init__( From fa907025f4b263d27c2b338fb0fe86d257d74fa8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 Oct 2023 11:07:38 -0400 Subject: [PATCH 551/562] Remove manys calls to cursor_to_dict (#16431) This avoids calling cursor_to_dict and then immediately unpacking the values in the dict for other users. By not creating the intermediate dictionary we can avoid allocating the dictionary and strings for the keys, which should generally be more performant. Additionally this improves type hints by avoid Dict[str, Any] dictionaries coming out of the database layer. --- changelog.d/16429.misc | 2 +- changelog.d/16431.misc | 1 + synapse/push/__init__.py | 2 +- .../storage/databases/main/account_data.py | 11 +- synapse/storage/databases/main/appservice.py | 29 +--- synapse/storage/databases/main/devices.py | 12 +- .../storage/databases/main/end_to_end_keys.py | 22 +-- synapse/storage/databases/main/events.py | 9 +- synapse/storage/databases/main/presence.py | 18 ++- synapse/storage/databases/main/pusher.py | 121 ++++++++++++---- synapse/storage/databases/main/receipts.py | 72 +++++----- .../storage/databases/main/registration.py | 131 ++++++++++-------- synapse/storage/databases/main/room.py | 42 +++--- synapse/storage/databases/main/roommember.py | 10 +- synapse/storage/databases/main/search.py | 20 +-- .../storage/databases/main/task_scheduler.py | 44 ++++-- 16 files changed, 319 insertions(+), 227 deletions(-) create mode 100644 changelog.d/16431.misc diff --git a/changelog.d/16429.misc b/changelog.d/16429.misc index 4241e143be..bd7cdd42af 100644 --- a/changelog.d/16429.misc +++ b/changelog.d/16429.misc @@ -1 +1 @@ -Reduce the size of each replication command instance. +Reduce memory allocations. diff --git a/changelog.d/16431.misc b/changelog.d/16431.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16431.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9e5eb2a445..4d405f2a0c 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -101,7 +101,7 @@ if TYPE_CHECKING: class PusherConfig: """Parameters necessary to configure a pusher.""" - id: Optional[str] + id: Optional[int] user_name: str profile_tag: str diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 80f146dd53..16c284807a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -151,10 +151,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) sql += " AND content != '{}'" txn.execute(sql, (user_id,)) - rows = self.db_pool.cursor_to_dict(txn) return { - row["account_data_type"]: db_to_json(row["content"]) for row in rows + account_data_type: db_to_json(content) + for account_data_type, content in txn } return await self.db_pool.runInteraction( @@ -196,13 +196,12 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) sql += " AND content != '{}'" txn.execute(sql, (user_id,)) - rows = self.db_pool.cursor_to_dict(txn) by_room: Dict[str, Dict[str, JsonDict]] = {} - for row in rows: - room_data = by_room.setdefault(row["room_id"], {}) + for room_id, account_data_type, content in txn: + room_data = by_room.setdefault(room_id, {}) - room_data[row["account_data_type"]] = db_to_json(row["content"]) + room_data[account_data_type] = db_to_json(content) return by_room diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 0553a0621a..073a99cd84 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -14,17 +14,7 @@ # limitations under the License. import logging import re -from typing import ( - TYPE_CHECKING, - Any, - Dict, - List, - Optional, - Pattern, - Sequence, - Tuple, - cast, -) +from typing import TYPE_CHECKING, List, Optional, Pattern, Sequence, Tuple, cast from synapse.appservice import ( ApplicationService, @@ -353,21 +343,15 @@ class ApplicationServiceTransactionWorkerStore( def _get_oldest_unsent_txn( txn: LoggingTransaction, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[Tuple[int, str]]: # Monotonically increasing txn ids, so just select the smallest # one in the txns table (we delete them when they are sent) txn.execute( - "SELECT * FROM application_services_txns WHERE as_id=?" + "SELECT txn_id, event_ids FROM application_services_txns WHERE as_id=?" " ORDER BY txn_id ASC LIMIT 1", (service.id,), ) - rows = self.db_pool.cursor_to_dict(txn) - if not rows: - return None - - entry = rows[0] - - return entry + return cast(Optional[Tuple[int, str]], txn.fetchone()) entry = await self.db_pool.runInteraction( "get_oldest_unsent_appservice_txn", _get_oldest_unsent_txn @@ -376,8 +360,9 @@ class ApplicationServiceTransactionWorkerStore( if not entry: return None - event_ids = db_to_json(entry["event_ids"]) + txn_id, event_ids_str = entry + event_ids = db_to_json(event_ids_str) events = await self.get_events_as_list(event_ids) # TODO: to-device messages, one-time key counts, device list summaries and unused @@ -385,7 +370,7 @@ class ApplicationServiceTransactionWorkerStore( # We likely want to populate those for reliability. return AppServiceTransaction( service=service, - id=entry["txn_id"], + id=txn_id, events=events, ephemeral=[], to_device_messages=[], diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index df596f35f9..9f3804a504 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1413,13 +1413,13 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def get_devices_not_accessed_since_txn( txn: LoggingTransaction, - ) -> List[Dict[str, str]]: + ) -> List[Tuple[str, str]]: sql = """ SELECT user_id, device_id FROM devices WHERE last_seen < ? AND hidden = FALSE """ txn.execute(sql, (since_ms,)) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, str]], txn.fetchall()) rows = await self.db_pool.runInteraction( "get_devices_not_accessed_since", @@ -1427,11 +1427,11 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) devices: Dict[str, List[str]] = {} - for row in rows: + for user_id, device_id in rows: # Remote devices are never stale from our point of view. - if self.hs.is_mine_id(row["user_id"]): - user_devices = devices.setdefault(row["user_id"], []) - user_devices.append(row["device_id"]) + if self.hs.is_mine_id(user_id): + user_devices = devices.setdefault(user_id, []) + user_devices.append(device_id) return devices diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 89fac23f93..749ae54e20 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -921,14 +921,10 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker } txn.execute(sql, params) - rows = self.db_pool.cursor_to_dict(txn) - for row in rows: - user_id = row["user_id"] - key_type = row["keytype"] - key = db_to_json(row["keydata"]) + for user_id, key_type, key_data, _ in txn: user_keys = result.setdefault(user_id, {}) - user_keys[key_type] = key + user_keys[key_type] = db_to_json(key_data) return result @@ -988,13 +984,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker query_params.extend(item) txn.execute(sql, query_params) - rows = self.db_pool.cursor_to_dict(txn) # and add the signatures to the appropriate keys - for row in rows: - key_id: str = row["key_id"] - target_user_id: str = row["target_user_id"] - target_device_id: str = row["target_device_id"] + for target_user_id, target_device_id, key_id, signature in txn: key_type = devices[(target_user_id, target_device_id)] # We need to copy everything, because the result may have come # from the cache. dict.copy only does a shallow copy, so we @@ -1012,13 +1004,11 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ].copy() if from_user_id in signatures: user_sigs = signatures[from_user_id] = signatures[from_user_id] - user_sigs[key_id] = row["signature"] + user_sigs[key_id] = signature else: - signatures[from_user_id] = {key_id: row["signature"]} + signatures[from_user_id] = {key_id: signature} else: - target_user_key["signatures"] = { - from_user_id: {key_id: row["signature"]} - } + target_user_key["signatures"] = {from_user_id: {key_id: signature}} return keys diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 790d058c43..d4dcdb898c 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1654,8 +1654,6 @@ class PersistEventsStore: ) -> None: to_prefill = [] - rows = [] - ev_map = {e.event_id: e for e, _ in events_and_contexts} if not ev_map: return @@ -1676,10 +1674,9 @@ class PersistEventsStore: ) txn.execute(sql + clause, args) - rows = self.db_pool.cursor_to_dict(txn) - for row in rows: - event = ev_map[row["event_id"]] - if not row["rejects"] and not row["redacts"]: + for event_id, redacts, rejects in txn: + event = ev_map[event_id] + if not rejects and not redacts: to_prefill.append(EventCacheEntry(event=event, redacted_event=None)) async def external_prefill() -> None: diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 194b4e031f..805c23f89f 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -434,13 +434,21 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) txn = db_conn.cursor() txn.execute(sql, (PresenceState.OFFLINE,)) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() txn.close() - for row in rows: - row["currently_active"] = bool(row["currently_active"]) - - return [UserPresenceState(**row) for row in rows] + return [ + UserPresenceState( + user_id=user_id, + state=state, + last_active_ts=last_active_ts, + last_federation_update_ts=last_federation_update_ts, + last_user_sync_ts=last_user_sync_ts, + status_msg=status_msg, + currently_active=bool(currently_active), + ) + for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows + ] def take_presence_startup_info(self) -> List[UserPresenceState]: active_on_startup = self._presence_on_startup diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 87e28e22d3..c7eb7fc478 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -47,6 +47,27 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# The type of a row in the pushers table. +PusherRow = Tuple[ + int, # id + str, # user_name + Optional[int], # access_token + str, # profile_tag + str, # kind + str, # app_id + str, # app_display_name + str, # device_display_name + str, # pushkey + int, # ts + str, # lang + str, # data + int, # last_stream_ordering + int, # last_success + int, # failing_since + bool, # enabled + str, # device_id +] + class PusherWorkerStore(SQLBaseStore): def __init__( @@ -83,30 +104,66 @@ class PusherWorkerStore(SQLBaseStore): self._remove_deleted_email_pushers, ) - def _decode_pushers_rows(self, rows: Iterable[dict]) -> Iterator[PusherConfig]: + def _decode_pushers_rows( + self, + rows: Iterable[PusherRow], + ) -> Iterator[PusherConfig]: """JSON-decode the data in the rows returned from the `pushers` table Drops any rows whose data cannot be decoded """ - for r in rows: - data_json = r["data"] + for ( + id, + user_name, + access_token, + profile_tag, + kind, + app_id, + app_display_name, + device_display_name, + pushkey, + ts, + lang, + data, + last_stream_ordering, + last_success, + failing_since, + enabled, + device_id, + ) in rows: try: - r["data"] = db_to_json(data_json) + data_json = db_to_json(data) except Exception as e: logger.warning( "Invalid JSON in data for pusher %d: %s, %s", - r["id"], - data_json, + id, + data, e.args[0], ) continue - # If we're using SQLite, then boolean values are integers. This is - # troublesome since some code using the return value of this method might - # expect it to be a boolean, or will expose it to clients (in responses). - r["enabled"] = bool(r["enabled"]) - - yield PusherConfig(**r) + yield PusherConfig( + id=id, + user_name=user_name, + profile_tag=profile_tag, + kind=kind, + app_id=app_id, + app_display_name=app_display_name, + device_display_name=device_display_name, + pushkey=pushkey, + ts=ts, + lang=lang, + data=data_json, + last_stream_ordering=last_stream_ordering, + last_success=last_success, + failing_since=failing_since, + # If we're using SQLite, then boolean values are integers. This is + # troublesome since some code using the return value of this method might + # expect it to be a boolean, or will expose it to clients (in responses). + enabled=bool(enabled), + device_id=device_id, + access_token=access_token, + ) def get_pushers_stream_token(self) -> int: return self._pushers_id_gen.get_current_token() @@ -136,7 +193,7 @@ class PusherWorkerStore(SQLBaseStore): The pushers for which the given columns have the given values. """ - def get_pushers_by_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def get_pushers_by_txn(txn: LoggingTransaction) -> List[PusherRow]: # We could technically use simple_select_list here, but we need to call # COALESCE on the 'enabled' column. While it is technically possible to give # simple_select_list the whole `COALESCE(...) AS ...` as a column name, it @@ -154,7 +211,7 @@ class PusherWorkerStore(SQLBaseStore): txn.execute(sql, list(keyvalues.values())) - return self.db_pool.cursor_to_dict(txn) + return cast(List[PusherRow], txn.fetchall()) ret = await self.db_pool.runInteraction( desc="get_pushers_by", @@ -164,14 +221,22 @@ class PusherWorkerStore(SQLBaseStore): return self._decode_pushers_rows(ret) async def get_enabled_pushers(self) -> Iterator[PusherConfig]: - def get_enabled_pushers_txn(txn: LoggingTransaction) -> Iterator[PusherConfig]: - txn.execute("SELECT * FROM pushers WHERE COALESCE(enabled, TRUE)") - rows = self.db_pool.cursor_to_dict(txn) + def get_enabled_pushers_txn(txn: LoggingTransaction) -> List[PusherRow]: + txn.execute( + """ + SELECT id, user_name, access_token, profile_tag, kind, app_id, + app_display_name, device_display_name, pushkey, ts, lang, data, + last_stream_ordering, last_success, failing_since, + enabled, device_id + FROM pushers WHERE COALESCE(enabled, TRUE) + """ + ) + return cast(List[PusherRow], txn.fetchall()) - return self._decode_pushers_rows(rows) - - return await self.db_pool.runInteraction( - "get_enabled_pushers", get_enabled_pushers_txn + return self._decode_pushers_rows( + await self.db_pool.runInteraction( + "get_enabled_pushers", get_enabled_pushers_txn + ) ) async def get_all_updated_pushers_rows( @@ -304,7 +369,7 @@ class PusherWorkerStore(SQLBaseStore): ) async def get_throttle_params_by_room( - self, pusher_id: str + self, pusher_id: int ) -> Dict[str, ThrottleParams]: res = await self.db_pool.simple_select_list( "pusher_throttle", @@ -323,7 +388,7 @@ class PusherWorkerStore(SQLBaseStore): return params_by_room async def set_throttle_params( - self, pusher_id: str, room_id: str, params: ThrottleParams + self, pusher_id: int, room_id: str, params: ThrottleParams ) -> None: await self.db_pool.simple_upsert( "pusher_throttle", @@ -534,7 +599,7 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): (last_pusher_id, batch_size), ) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if len(rows) == 0: return 0 @@ -550,19 +615,19 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): txn=txn, table="pushers", key_names=("id",), - key_values=[(row["pusher_id"],) for row in rows], + key_values=[row[0] for row in rows], value_names=("device_id", "access_token"), # If there was already a device_id on the pusher, we only want to clear # the access_token column, so we keep the existing device_id. Otherwise, # we set the device_id we got from joining the access_tokens table. value_values=[ - (row["pusher_device_id"] or row["token_device_id"], None) - for row in rows + (pusher_device_id or token_device_id, None) + for _, pusher_device_id, token_device_id in rows ], ) self.db_pool.updates._background_update_progress_txn( - txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["pusher_id"]} + txn, "set_device_id_for_pushers", {"pusher_id": rows[-1][0]} ) return len(rows) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 3bab1024ea..b2645ab43c 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -313,25 +313,25 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -> Sequence[JsonMapping]: """See get_linearized_receipts_for_room""" - def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str]]: if from_key: sql = ( - "SELECT * FROM receipts_linearized WHERE" + "SELECT receipt_type, user_id, event_id, data" + " FROM receipts_linearized WHERE" " room_id = ? AND stream_id > ? AND stream_id <= ?" ) txn.execute(sql, (room_id, from_key, to_key)) else: sql = ( - "SELECT * FROM receipts_linearized WHERE" + "SELECT receipt_type, user_id, event_id, data" + " FROM receipts_linearized WHERE" " room_id = ? AND stream_id <= ?" ) txn.execute(sql, (room_id, to_key)) - rows = self.db_pool.cursor_to_dict(txn) - - return rows + return cast(List[Tuple[str, str, str, str]], txn.fetchall()) rows = await self.db_pool.runInteraction("get_linearized_receipts_for_room", f) @@ -339,10 +339,10 @@ class ReceiptsWorkerStore(SQLBaseStore): return [] content: JsonDict = {} - for row in rows: - content.setdefault(row["event_id"], {}).setdefault(row["receipt_type"], {})[ - row["user_id"] - ] = db_to_json(row["data"]) + for receipt_type, user_id, event_id, data in rows: + content.setdefault(event_id, {}).setdefault(receipt_type, {})[ + user_id + ] = db_to_json(data) return [{"type": EduTypes.RECEIPT, "room_id": room_id, "content": content}] @@ -357,10 +357,13 @@ class ReceiptsWorkerStore(SQLBaseStore): if not room_ids: return {} - def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def f( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, str, str, Optional[str], str]]: if from_key: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? AND """ clause, args = make_in_list_sql_clause( @@ -370,7 +373,8 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql + clause, [from_key, to_key] + list(args)) else: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized WHERE stream_id <= ? AND """ @@ -380,29 +384,31 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql + clause, [to_key] + list(args)) - return self.db_pool.cursor_to_dict(txn) + return cast( + List[Tuple[str, str, str, str, Optional[str], str]], txn.fetchall() + ) txn_results = await self.db_pool.runInteraction( "_get_linearized_receipts_for_rooms", f ) results: JsonDict = {} - for row in txn_results: + for room_id, receipt_type, user_id, event_id, thread_id, data in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault( - row["room_id"], - {"type": EduTypes.RECEIPT, "room_id": row["room_id"], "content": {}}, + room_id, + {"type": EduTypes.RECEIPT, "room_id": room_id, "content": {}}, ) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": }, .. }, .. } - event_entry = room_event["content"].setdefault(row["event_id"], {}) - receipt_type = event_entry.setdefault(row["receipt_type"], {}) + event_entry = room_event["content"].setdefault(event_id, {}) + receipt_type_dict = event_entry.setdefault(receipt_type, {}) - receipt_type[row["user_id"]] = db_to_json(row["data"]) - if row["thread_id"]: - receipt_type[row["user_id"]]["thread_id"] = row["thread_id"] + receipt_type_dict[user_id] = db_to_json(data) + if thread_id: + receipt_type_dict[user_id]["thread_id"] = thread_id results = { room_id: [results[room_id]] if room_id in results else [] @@ -428,10 +434,11 @@ class ReceiptsWorkerStore(SQLBaseStore): A dictionary of roomids to a list of receipts. """ - def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str, str]]: if from_key: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, data + FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? ORDER BY stream_id DESC LIMIT 100 @@ -439,7 +446,8 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql, [from_key, to_key]) else: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, data + FROM receipts_linearized WHERE stream_id <= ? ORDER BY stream_id DESC LIMIT 100 @@ -447,27 +455,27 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql, [to_key]) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, str, str, str, str]], txn.fetchall()) txn_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_all_rooms", f ) results: JsonDict = {} - for row in txn_results: + for room_id, receipt_type, user_id, event_id, data in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault( - row["room_id"], - {"type": EduTypes.RECEIPT, "room_id": row["room_id"], "content": {}}, + room_id, + {"type": EduTypes.RECEIPT, "room_id": room_id, "content": {}}, ) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": }, .. }, .. } - event_entry = room_event["content"].setdefault(row["event_id"], {}) - receipt_type = event_entry.setdefault(row["receipt_type"], {}) + event_entry = room_event["content"].setdefault(event_id, {}) + receipt_type_dict = event_entry.setdefault(receipt_type, {}) - receipt_type[row["user_id"]] = db_to_json(row["data"]) + receipt_type_dict[user_id] = db_to_json(data) return results diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index cc964604e2..64a2c31a5d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -195,7 +195,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): async def get_user_by_id(self, user_id: str) -> Optional[UserInfo]: """Returns info about the user account, if it exists.""" - def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: + def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[UserInfo]: # We could technically use simple_select_one here, but it would not perform # the COALESCEs (unless hacked into the column names), which could yield # confusing results. @@ -213,35 +213,46 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): (user_id,), ) - rows = self.db_pool.cursor_to_dict(txn) - - if len(rows) == 0: + row = txn.fetchone() + if not row: return None - return rows[0] + ( + name, + is_guest, + admin, + consent_version, + consent_ts, + consent_server_notice_sent, + appservice_id, + creation_ts, + user_type, + deactivated, + shadow_banned, + approved, + locked, + ) = row - row = await self.db_pool.runInteraction( + return UserInfo( + appservice_id=appservice_id, + consent_server_notice_sent=consent_server_notice_sent, + consent_version=consent_version, + consent_ts=consent_ts, + creation_ts=creation_ts, + is_admin=bool(admin), + is_deactivated=bool(deactivated), + is_guest=bool(is_guest), + is_shadow_banned=bool(shadow_banned), + user_id=UserID.from_string(name), + user_type=user_type, + approved=bool(approved), + locked=bool(locked), + ) + + return await self.db_pool.runInteraction( desc="get_user_by_id", func=get_user_by_id_txn, ) - if row is None: - return None - - return UserInfo( - appservice_id=row["appservice_id"], - consent_server_notice_sent=row["consent_server_notice_sent"], - consent_version=row["consent_version"], - consent_ts=row["consent_ts"], - creation_ts=row["creation_ts"], - is_admin=bool(row["admin"]), - is_deactivated=bool(row["deactivated"]), - is_guest=bool(row["is_guest"]), - is_shadow_banned=bool(row["shadow_banned"]), - user_id=UserID.from_string(row["name"]), - user_type=row["user_type"], - approved=bool(row["approved"]), - locked=bool(row["locked"]), - ) async def is_trial_user(self, user_id: str) -> bool: """Checks if user is in the "trial" period, i.e. within the first @@ -579,16 +590,31 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """ txn.execute(sql, (token,)) - rows = self.db_pool.cursor_to_dict(txn) + row = txn.fetchone() - if rows: - row = rows[0] + if row: + ( + user_id, + is_guest, + shadow_banned, + token_id, + device_id, + valid_until_ms, + token_owner, + token_used, + ) = row - # This field is nullable, ensure it comes out as a boolean - if row["token_used"] is None: - row["token_used"] = False - - return TokenLookupResult(**row) + return TokenLookupResult( + user_id=user_id, + is_guest=is_guest, + shadow_banned=shadow_banned, + token_id=token_id, + device_id=device_id, + valid_until_ms=valid_until_ms, + token_owner=token_owner, + # This field is nullable, ensure it comes out as a boolean + token_used=bool(token_used), + ) return None @@ -833,11 +859,10 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """Counts all users registered on the homeserver.""" def _count_users(txn: LoggingTransaction) -> int: - txn.execute("SELECT COUNT(*) AS users FROM users") - rows = self.db_pool.cursor_to_dict(txn) - if rows: - return rows[0]["users"] - return 0 + txn.execute("SELECT COUNT(*) FROM users") + row = txn.fetchone() + assert row is not None + return row[0] return await self.db_pool.runInteraction("count_users", _count_users) @@ -891,11 +916,10 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """Counts all users without a special user_type registered on the homeserver.""" def _count_users(txn: LoggingTransaction) -> int: - txn.execute("SELECT COUNT(*) AS users FROM users where user_type is null") - rows = self.db_pool.cursor_to_dict(txn) - if rows: - return rows[0]["users"] - return 0 + txn.execute("SELECT COUNT(*) FROM users where user_type is null") + row = txn.fetchone() + assert row is not None + return row[0] return await self.db_pool.runInteraction("count_real_users", _count_users) @@ -1252,12 +1276,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): ) txn.execute(sql, []) - res = self.db_pool.cursor_to_dict(txn) - if res: - for user in res: - self.set_expiration_date_for_user_txn( - txn, user["name"], use_delta=True - ) + for (name,) in txn.fetchall(): + self.set_expiration_date_for_user_txn(txn, name, use_delta=True) await self.db_pool.runInteraction( "get_users_with_no_expiration_date", @@ -1963,11 +1983,12 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): (user_id,), ) - rows = self.db_pool.cursor_to_dict(txn) + row = txn.fetchone() + assert row is not None # We cast to bool because the value returned by the database engine might # be an integer if we're using SQLite. - return bool(rows[0]["approved"]) + return bool(row[0]) return await self.db_pool.runInteraction( desc="is_user_pending_approval", @@ -2045,22 +2066,22 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): (last_user, batch_size), ) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return True, 0 rows_processed_nb = 0 - for user in rows: - if not user["count_tokens"] and not user["count_threepids"]: - self.set_user_deactivated_status_txn(txn, user["name"], True) + for name, count_tokens, count_threepids in rows: + if not count_tokens and not count_threepids: + self.set_user_deactivated_status_txn(txn, name, True) rows_processed_nb += 1 logger.info("Marked %d rows as deactivated", rows_processed_nb) self.db_pool.updates._background_update_progress_txn( - txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]} + txn, "users_set_deactivated_flag", {"user_id": rows[-1][0]} ) if batch_size > len(rows): diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 719e11aea6..1d4d99932b 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -831,7 +831,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_retention_policy_for_room_txn( txn: LoggingTransaction, - ) -> List[Dict[str, Optional[int]]]: + ) -> Optional[Tuple[Optional[int], Optional[int]]]: txn.execute( """ SELECT min_lifetime, max_lifetime FROM room_retention @@ -841,7 +841,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): (room_id,), ) - return self.db_pool.cursor_to_dict(txn) + return cast(Optional[Tuple[Optional[int], Optional[int]]], txn.fetchone()) ret = await self.db_pool.runInteraction( "get_retention_policy_for_room", @@ -856,8 +856,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): max_lifetime=self.config.retention.retention_default_max_lifetime, ) - min_lifetime = ret[0]["min_lifetime"] - max_lifetime = ret[0]["max_lifetime"] + min_lifetime, max_lifetime = ret # If one of the room's policy's attributes isn't defined, use the matching # attribute from the default policy. @@ -1162,14 +1161,13 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn.execute(sql, args) - rows = self.db_pool.cursor_to_dict(txn) - rooms_dict = {} - - for row in rows: - rooms_dict[row["room_id"]] = RetentionPolicy( - min_lifetime=row["min_lifetime"], - max_lifetime=row["max_lifetime"], + rooms_dict = { + room_id: RetentionPolicy( + min_lifetime=min_lifetime, + max_lifetime=max_lifetime, ) + for room_id, min_lifetime, max_lifetime in txn + } if include_null: # If required, do a second query that retrieves all of the rooms we know @@ -1178,13 +1176,11 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn.execute(sql) - rows = self.db_pool.cursor_to_dict(txn) - # If a room isn't already in the dict (i.e. it doesn't have a retention # policy in its state), add it with a null policy. - for row in rows: - if row["room_id"] not in rooms_dict: - rooms_dict[row["room_id"]] = RetentionPolicy() + for (room_id,) in txn: + if room_id not in rooms_dict: + rooms_dict[room_id] = RetentionPolicy() return rooms_dict @@ -1703,24 +1699,24 @@ class RoomBackgroundUpdateStore(SQLBaseStore): (last_room, batch_size), ) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return True - for row in rows: - if not row["json"]: + for room_id, event_id, json in rows: + if not json: retention_policy = {} else: - ev = db_to_json(row["json"]) + ev = db_to_json(json) retention_policy = ev["content"] self.db_pool.simple_insert_txn( txn=txn, table="room_retention", values={ - "room_id": row["room_id"], - "event_id": row["event_id"], + "room_id": room_id, + "event_id": event_id, "min_lifetime": retention_policy.get("min_lifetime"), "max_lifetime": retention_policy.get("max_lifetime"), }, @@ -1729,7 +1725,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore): logger.info("Inserted %d rows into room_retention", len(rows)) self.db_pool.updates._background_update_progress_txn( - txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} + txn, "insert_room_retention", {"room_id": rows[-1][0]} ) if batch_size > len(rows): diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e93573f315..bbe08368db 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1349,18 +1349,16 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return 0 - min_stream_id = rows[-1]["stream_ordering"] + min_stream_id = rows[-1][0] to_update = [] - for row in rows: - event_id = row["event_id"] - room_id = row["room_id"] + for _, event_id, room_id, json in rows: try: - event_json = db_to_json(row["json"]) + event_json = db_to_json(json) content = event_json["content"] except Exception: continue diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index a7aae661d8..1d69c4a5f0 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -179,22 +179,24 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): # store_search_entries_txn with a generator function, but that # would mean having two cursors open on the database at once. # Instead we just build a list of results. - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return 0 - min_stream_id = rows[-1]["stream_ordering"] + min_stream_id = rows[-1][0] event_search_rows = [] - for row in rows: + for ( + stream_ordering, + event_id, + room_id, + etype, + json, + origin_server_ts, + ) in rows: try: - event_id = row["event_id"] - room_id = row["room_id"] - etype = row["type"] - stream_ordering = row["stream_ordering"] - origin_server_ts = row["origin_server_ts"] try: - event_json = db_to_json(row["json"]) + event_json = db_to_json(json) content = event_json["content"] except Exception: continue diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 5c5372a825..5555b53575 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, List, Optional, Tuple, cast from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -27,6 +27,8 @@ from synapse.util import json_encoder if TYPE_CHECKING: from synapse.server import HomeServer +ScheduledTaskRow = Tuple[str, str, str, int, str, str, str, str] + class TaskSchedulerWorkerStore(SQLBaseStore): def __init__( @@ -38,13 +40,18 @@ class TaskSchedulerWorkerStore(SQLBaseStore): super().__init__(database, db_conn, hs) @staticmethod - def _convert_row_to_task(row: Dict[str, Any]) -> ScheduledTask: - row["status"] = TaskStatus(row["status"]) - if row["params"] is not None: - row["params"] = db_to_json(row["params"]) - if row["result"] is not None: - row["result"] = db_to_json(row["result"]) - return ScheduledTask(**row) + def _convert_row_to_task(row: ScheduledTaskRow) -> ScheduledTask: + task_id, action, status, timestamp, resource_id, params, result, error = row + return ScheduledTask( + id=task_id, + action=action, + status=TaskStatus(status), + timestamp=timestamp, + resource_id=resource_id, + params=db_to_json(params) if params is not None else None, + result=db_to_json(result) if result is not None else None, + error=error, + ) async def get_scheduled_tasks( self, @@ -68,7 +75,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): Returns: a list of `ScheduledTask`, ordered by increasing timestamps """ - def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[ScheduledTaskRow]: clauses: List[str] = [] args: List[Any] = [] if resource_id: @@ -101,7 +108,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): args.append(limit) txn.execute(sql, args) - return self.db_pool.cursor_to_dict(txn) + return cast(List[ScheduledTaskRow], txn.fetchall()) rows = await self.db_pool.runInteraction( "get_scheduled_tasks", get_scheduled_tasks_txn @@ -193,7 +200,22 @@ class TaskSchedulerWorkerStore(SQLBaseStore): desc="get_scheduled_task", ) - return TaskSchedulerWorkerStore._convert_row_to_task(row) if row else None + return ( + TaskSchedulerWorkerStore._convert_row_to_task( + ( + row["id"], + row["action"], + row["status"], + row["timestamp"], + row["resource_id"], + row["params"], + row["result"], + row["error"], + ) + ) + if row + else None + ) async def delete_scheduled_task(self, id: str) -> None: """Delete a specific task from its id. From 3555790b27a923f29283dbb01fed6844086edcd1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 Oct 2023 17:42:44 -0400 Subject: [PATCH 552/562] Remove unused method. (#16435) --- changelog.d/16435.misc | 1 + synapse/storage/databases/main/__init__.py | 20 -------------------- 2 files changed, 1 insertion(+), 20 deletions(-) create mode 100644 changelog.d/16435.misc diff --git a/changelog.d/16435.misc b/changelog.d/16435.misc new file mode 100644 index 0000000000..e541607161 --- /dev/null +++ b/changelog.d/16435.misc @@ -0,0 +1 @@ +Remove unused method. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 101403578c..dfcbf0a175 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -142,26 +142,6 @@ class DataStore( super().__init__(database, db_conn, hs) - async def get_users(self) -> List[JsonDict]: - """Function to retrieve a list of users in users table. - - Returns: - A list of dictionaries representing users. - """ - return await self.db_pool.simple_select_list( - table="users", - keyvalues={}, - retcols=[ - "name", - "password_hash", - "is_guest", - "admin", - "user_type", - "deactivated", - ], - desc="get_users", - ) - async def get_users_paginate( self, start: int, From 5946074d69314226343a0727f24e3aa9616aa1f6 Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 6 Oct 2023 12:27:59 +0200 Subject: [PATCH 553/562] Bump pyo3 from 0.17.1 to 0.19.2 (#16162) Signed-off-by: Kai A. Hiller --- Cargo.lock | 28 ++++++++++++++-------------- changelog.d/16162.misc | 1 + rust/Cargo.toml | 6 +++--- rust/src/push/evaluator.rs | 11 +++++++++++ 4 files changed, 29 insertions(+), 17 deletions(-) create mode 100644 changelog.d/16162.misc diff --git a/Cargo.lock b/Cargo.lock index 084b8b91c3..f2b44b5448 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -144,9 +144,9 @@ checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -191,9 +191,9 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543" +checksum = "e681a6cfdc4adcc93b4d3cf993749a4552018ee0a9b65fc0ccfad74352c72a38" dependencies = [ "anyhow", "cfg-if", @@ -209,9 +209,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8" +checksum = "076c73d0bc438f7a4ef6fdd0c3bb4732149136abd952b110ac93e4edb13a6ba5" dependencies = [ "once_cell", "target-lexicon", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc" +checksum = "e53cee42e77ebe256066ba8aa77eff722b3bb91f3419177cf4cd0f304d3284d9" dependencies = [ "libc", "pyo3-build-config", @@ -240,9 +240,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28" +checksum = "dfeb4c99597e136528c6dd7d5e3de5434d1ceaf487436a3f03b2d56b6fc9efd1" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -252,9 +252,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f" +checksum = "947dc12175c254889edc0c02e399476c2f652b4b9ebd123aa655c224de259536" dependencies = [ "proc-macro2", "quote", @@ -263,9 +263,9 @@ dependencies = [ [[package]] name = "pythonize" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6" +checksum = "8e35b716d430ace57e2d1b4afb51c9e5b7c46d2bce72926e07f9be6a98ced03e" dependencies = [ "pyo3", "serde", diff --git a/changelog.d/16162.misc b/changelog.d/16162.misc new file mode 100644 index 0000000000..b6c77229c1 --- /dev/null +++ b/changelog.d/16162.misc @@ -0,0 +1 @@ +Bump pyo3 from 0.17.1 to 0.19.2. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 26403d58cc..f62da35a6f 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -25,14 +25,14 @@ name = "synapse.synapse_rust" anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.17.1", features = [ +pyo3 = { version = "0.19.2", features = [ "macros", "anyhow", "abi3", - "abi3-py37", + "abi3-py38", ] } pyo3-log = "0.8.1" -pythonize = "0.17.0" +pythonize = "0.19.0" regex = "1.6.0" serde = { version = "1.0.144", features = ["derive"] } serde_json = "1.0.85" diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 48e670478b..3bde075528 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -105,6 +105,17 @@ impl PushRuleEvaluator { /// Create a new `PushRuleEvaluator`. See struct docstring for details. #[allow(clippy::too_many_arguments)] #[new] + #[pyo3(signature = ( + flattened_keys, + has_mentions, + room_member_count, + sender_power_level, + notification_power_levels, + related_events_flattened, + related_event_match_enabled, + room_version_feature_flags, + msc3931_enabled, + ))] pub fn py_new( flattened_keys: BTreeMap, has_mentions: bool, From 26b960b08ba0110ef3246e5749bb75b9b04a231c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:22:55 -0400 Subject: [PATCH 554/562] Register media servlets via regex. (#16419) This converts the media servlet URLs in the same way as (most) of the rest of Synapse. This will give more flexibility in the versions each endpoint exists under. --- changelog.d/16419.misc | 1 + synapse/http/server.py | 2 +- synapse/media/_base.py | 48 +------ synapse/media/media_repository.py | 10 +- synapse/rest/media/config_resource.py | 13 +- synapse/rest/media/download_resource.py | 40 ++++-- .../rest/media/media_repository_resource.py | 33 +++-- synapse/rest/media/preview_url_resource.py | 26 ++-- synapse/rest/media/thumbnail_resource.py | 35 ++--- synapse/rest/media/upload_resource.py | 14 +- tests/media/test_media_storage.py | 88 ++++++------- tests/media/test_url_previewer.py | 6 +- tests/replication/test_multi_media_repo.py | 19 ++- tests/rest/admin/test_admin.py | 58 +++----- tests/rest/admin/test_media.py | 71 +++------- tests/rest/admin/test_statistics.py | 15 ++- tests/rest/admin/test_user.py | 21 ++- tests/rest/client/utils.py | 6 +- tests/rest/media/test_url_preview.py | 124 +++++++++++------- tests/unittest.py | 4 +- 20 files changed, 297 insertions(+), 337 deletions(-) create mode 100644 changelog.d/16419.misc diff --git a/changelog.d/16419.misc b/changelog.d/16419.misc new file mode 100644 index 0000000000..591f371d00 --- /dev/null +++ b/changelog.d/16419.misc @@ -0,0 +1 @@ +Update registration of media repository URLs. diff --git a/synapse/http/server.py b/synapse/http/server.py index 3bbf91298e..1e4e56f36b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -266,7 +266,7 @@ class HttpServer(Protocol): def register_paths( self, method: str, - path_patterns: Iterable[Pattern], + path_patterns: Iterable[Pattern[str]], callback: ServletCallback, servlet_classname: str, ) -> None: diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 80c448de2b..d103b43449 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -26,11 +26,11 @@ from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender from twisted.web.server import Request -from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.api.errors import Codes, cs_error from synapse.http.server import finish_request, respond_with_json from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.util.stringutils import is_ascii, parse_and_validate_server_name +from synapse.util.stringutils import is_ascii logger = logging.getLogger(__name__) @@ -84,52 +84,12 @@ INLINE_CONTENT_TYPES = [ ] -def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: - """Parses the server name, media ID and optional file name from the request URI - - Also performs some rough validation on the server name. - - Args: - request: The `Request`. - - Returns: - A tuple containing the parsed server name, media ID and optional file name. - - Raises: - SynapseError(404): if parsing or validation fail for any reason - """ - try: - # The type on postpath seems incorrect in Twisted 21.2.0. - postpath: List[bytes] = request.postpath # type: ignore - assert postpath - - # This allows users to append e.g. /test.png to the URL. Useful for - # clients that parse the URL to see content type. - server_name_bytes, media_id_bytes = postpath[:2] - server_name = server_name_bytes.decode("utf-8") - media_id = media_id_bytes.decode("utf8") - - # Validate the server name, raising if invalid - parse_and_validate_server_name(server_name) - - file_name = None - if len(postpath) > 2: - try: - file_name = urllib.parse.unquote(postpath[-1].decode("utf-8")) - except UnicodeDecodeError: - pass - return server_name, media_id, file_name - except Exception: - raise SynapseError( - 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN - ) - - def respond_404(request: SynapseRequest) -> None: + assert request.path is not None respond_with_json( request, 404, - cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND), + cs_error("Not found '%s'" % (request.path.decode(),), code=Codes.NOT_FOUND), send_cors=True, ) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 1b7b014f9a..d11c2ff4ee 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -48,6 +48,7 @@ from synapse.media.filepath import MediaFilePaths from synapse.media.media_storage import MediaStorage from synapse.media.storage_provider import StorageProviderWrapper from synapse.media.thumbnailer import Thumbnailer, ThumbnailError +from synapse.media.url_previewer import UrlPreviewer from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID from synapse.util.async_helpers import Linearizer @@ -114,7 +115,7 @@ class MediaRepository: ) storage_providers.append(provider) - self.media_storage = MediaStorage( + self.media_storage: MediaStorage = MediaStorage( self.hs, self.primary_base_path, self.filepaths, storage_providers ) @@ -142,6 +143,13 @@ class MediaRepository: MEDIA_RETENTION_CHECK_PERIOD_MS, ) + if hs.config.media.url_preview_enabled: + self.url_previewer: Optional[UrlPreviewer] = UrlPreviewer( + hs, self, self.media_storage + ) + else: + self.url_previewer = None + def _start_update_recently_accessed(self) -> Deferred: return run_as_background_process( "update_recently_accessed_media", self._update_recently_accessed diff --git a/synapse/rest/media/config_resource.py b/synapse/rest/media/config_resource.py index a95804d327..dbf5133c72 100644 --- a/synapse/rest/media/config_resource.py +++ b/synapse/rest/media/config_resource.py @@ -14,17 +14,19 @@ # limitations under the License. # +import re from typing import TYPE_CHECKING -from synapse.http.server import DirectServeJsonResource, respond_with_json +from synapse.http.server import respond_with_json +from synapse.http.servlet import RestServlet from synapse.http.site import SynapseRequest if TYPE_CHECKING: from synapse.server import HomeServer -class MediaConfigResource(DirectServeJsonResource): - isLeaf = True +class MediaConfigResource(RestServlet): + PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/config$")] def __init__(self, hs: "HomeServer"): super().__init__() @@ -33,9 +35,6 @@ class MediaConfigResource(DirectServeJsonResource): self.auth = hs.get_auth() self.limits_dict = {"m.upload.size": config.media.max_upload_size} - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET(self, request: SynapseRequest) -> None: await self.auth.get_user_by_req(request) respond_with_json(request, 200, self.limits_dict, send_cors=True) - - async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: - respond_with_json(request, 200, {}, send_cors=True) diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index 3c618ef60a..65b9ff52fa 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -13,16 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING +import re +from typing import TYPE_CHECKING, Optional -from synapse.http.server import ( - DirectServeJsonResource, - set_corp_headers, - set_cors_headers, -) -from synapse.http.servlet import parse_boolean +from synapse.http.server import set_corp_headers, set_cors_headers +from synapse.http.servlet import RestServlet, parse_boolean from synapse.http.site import SynapseRequest -from synapse.media._base import parse_media_id, respond_404 +from synapse.media._base import respond_404 +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository @@ -31,15 +29,28 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class DownloadResource(DirectServeJsonResource): - isLeaf = True +class DownloadResource(RestServlet): + PATTERNS = [ + re.compile( + "/_matrix/media/(r0|v3|v1)/download/(?P[^/]*)/(?P[^/]*)(/(?P[^/]*))?$" + ) + ] def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): super().__init__() self.media_repo = media_repo self._is_mine_server_name = hs.is_mine_server_name - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET( + self, + request: SynapseRequest, + server_name: str, + media_id: str, + file_name: Optional[str] = None, + ) -> None: + # Validate the server name, raising if invalid + parse_and_validate_server_name(server_name) + set_cors_headers(request) set_corp_headers(request) request.setHeader( @@ -58,9 +69,8 @@ class DownloadResource(DirectServeJsonResource): b"Referrer-Policy", b"no-referrer", ) - server_name, media_id, name = parse_media_id(request) if self._is_mine_server_name(server_name): - await self.media_repo.get_local_media(request, media_id, name) + await self.media_repo.get_local_media(request, media_id, file_name) else: allow_remote = parse_boolean(request, "allow_remote", default=True) if not allow_remote: @@ -72,4 +82,6 @@ class DownloadResource(DirectServeJsonResource): respond_404(request) return - await self.media_repo.get_remote_media(request, server_name, media_id, name) + await self.media_repo.get_remote_media( + request, server_name, media_id, file_name + ) diff --git a/synapse/rest/media/media_repository_resource.py b/synapse/rest/media/media_repository_resource.py index 5ebaa3b032..2089bb1029 100644 --- a/synapse/rest/media/media_repository_resource.py +++ b/synapse/rest/media/media_repository_resource.py @@ -15,7 +15,7 @@ from typing import TYPE_CHECKING from synapse.config._base import ConfigError -from synapse.http.server import UnrecognizedRequestResource +from synapse.http.server import HttpServer, JsonResource from .config_resource import MediaConfigResource from .download_resource import DownloadResource @@ -27,7 +27,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer -class MediaRepositoryResource(UnrecognizedRequestResource): +class MediaRepositoryResource(JsonResource): """File uploading and downloading. Uploads are POSTed to a resource which returns a token which is used to GET @@ -70,6 +70,11 @@ class MediaRepositoryResource(UnrecognizedRequestResource): width and height are close to the requested size and the aspect matches the requested size. The client should scale the image if it needs to fit within a given rectangle. + + This gets mounted at various points under /_matrix/media, including: + * /_matrix/media/r0 + * /_matrix/media/v1 + * /_matrix/media/v3 """ def __init__(self, hs: "HomeServer"): @@ -77,17 +82,23 @@ class MediaRepositoryResource(UnrecognizedRequestResource): if not hs.config.media.can_load_media_repo: raise ConfigError("Synapse is not configured to use a media repo.") - super().__init__() + JsonResource.__init__(self, hs, canonical_json=False) + self.register_servlets(self, hs) + + @staticmethod + def register_servlets(http_server: HttpServer, hs: "HomeServer") -> None: media_repo = hs.get_media_repository() - self.putChild(b"upload", UploadResource(hs, media_repo)) - self.putChild(b"download", DownloadResource(hs, media_repo)) - self.putChild( - b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) + # Note that many of these should not exist as v1 endpoints, but empirically + # a lot of traffic still goes to them. + + UploadResource(hs, media_repo).register(http_server) + DownloadResource(hs, media_repo).register(http_server) + ThumbnailResource(hs, media_repo, media_repo.media_storage).register( + http_server ) if hs.config.media.url_preview_enabled: - self.putChild( - b"preview_url", - PreviewUrlResource(hs, media_repo, media_repo.media_storage), + PreviewUrlResource(hs, media_repo, media_repo.media_storage).register( + http_server ) - self.putChild(b"config", MediaConfigResource(hs)) + MediaConfigResource(hs).register(http_server) diff --git a/synapse/rest/media/preview_url_resource.py b/synapse/rest/media/preview_url_resource.py index 58513c4be4..c8acb65dca 100644 --- a/synapse/rest/media/preview_url_resource.py +++ b/synapse/rest/media/preview_url_resource.py @@ -13,24 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re from typing import TYPE_CHECKING -from synapse.http.server import ( - DirectServeJsonResource, - respond_with_json, - respond_with_json_bytes, -) -from synapse.http.servlet import parse_integer, parse_string +from synapse.http.server import respond_with_json_bytes +from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.media.media_storage import MediaStorage -from synapse.media.url_previewer import UrlPreviewer if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer -class PreviewUrlResource(DirectServeJsonResource): +class PreviewUrlResource(RestServlet): """ The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix @@ -48,7 +44,7 @@ class PreviewUrlResource(DirectServeJsonResource): * Matrix cannot be used to distribute the metadata between homeservers. """ - isLeaf = True + PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/preview_url$")] def __init__( self, @@ -62,14 +58,10 @@ class PreviewUrlResource(DirectServeJsonResource): self.clock = hs.get_clock() self.media_repo = media_repo self.media_storage = media_storage + assert self.media_repo.url_previewer is not None + self.url_previewer = self.media_repo.url_previewer - self._url_previewer = UrlPreviewer(hs, media_repo, media_storage) - - async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: - request.setHeader(b"Allow", b"OPTIONS, GET") - respond_with_json(request, 200, {}, send_cors=True) - - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET(self, request: SynapseRequest) -> None: # XXX: if get_user_by_req fails, what should we do in an async render? requester = await self.auth.get_user_by_req(request) url = parse_string(request, "url", required=True) @@ -77,5 +69,5 @@ class PreviewUrlResource(DirectServeJsonResource): if ts is None: ts = self.clock.time_msec() - og = await self._url_previewer.preview(url, requester.user, ts) + og = await self.url_previewer.preview(url, requester.user, ts) respond_with_json_bytes(request, 200, og, send_cors=True) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index 661e604b85..f9cd773f77 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -13,29 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. - import logging +import re from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from synapse.api.errors import Codes, SynapseError, cs_error from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP -from synapse.http.server import ( - DirectServeJsonResource, - respond_with_json, - set_corp_headers, - set_cors_headers, -) -from synapse.http.servlet import parse_integer, parse_string +from synapse.http.server import respond_with_json, set_corp_headers, set_cors_headers +from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.media._base import ( FileInfo, ThumbnailInfo, - parse_media_id, respond_404, respond_with_file, respond_with_responder, ) from synapse.media.media_storage import MediaStorage +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository @@ -44,8 +39,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class ThumbnailResource(DirectServeJsonResource): - isLeaf = True +class ThumbnailResource(RestServlet): + PATTERNS = [ + re.compile( + "/_matrix/media/(r0|v3|v1)/thumbnail/(?P[^/]*)/(?P[^/]*)$" + ) + ] def __init__( self, @@ -60,12 +59,17 @@ class ThumbnailResource(DirectServeJsonResource): self.media_storage = media_storage self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails self._is_mine_server_name = hs.is_mine_server_name + self._server_name = hs.hostname self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET( + self, request: SynapseRequest, server_name: str, media_id: str + ) -> None: + # Validate the server name, raising if invalid + parse_and_validate_server_name(server_name) + set_cors_headers(request) set_corp_headers(request) - server_name, media_id, _ = parse_media_id(request) width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") @@ -418,13 +422,14 @@ class ThumbnailResource(DirectServeJsonResource): # `dynamic_thumbnails` is disabled. logger.info("Failed to find any generated thumbnails") + assert request.path is not None respond_with_json( request, 400, cs_error( - "Cannot find any thumbnails for the requested media (%r). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)" + "Cannot find any thumbnails for the requested media ('%s'). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)" % ( - request.postpath, + request.path.decode(), ", ".join(THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.keys()), ), code=Codes.UNKNOWN, diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 043e8d6077..949326d85d 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -14,11 +14,12 @@ # limitations under the License. import logging +import re from typing import IO, TYPE_CHECKING, Dict, List, Optional from synapse.api.errors import Codes, SynapseError -from synapse.http.server import DirectServeJsonResource, respond_with_json -from synapse.http.servlet import parse_bytes_from_args +from synapse.http.server import respond_with_json +from synapse.http.servlet import RestServlet, parse_bytes_from_args from synapse.http.site import SynapseRequest from synapse.media.media_storage import SpamMediaException @@ -29,8 +30,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class UploadResource(DirectServeJsonResource): - isLeaf = True +class UploadResource(RestServlet): + PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/upload")] def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): super().__init__() @@ -43,10 +44,7 @@ class UploadResource(DirectServeJsonResource): self.max_upload_size = hs.config.media.max_upload_size self.clock = hs.get_clock() - async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: - respond_with_json(request, 200, {}, send_cors=True) - - async def _async_render_POST(self, request: SynapseRequest) -> None: + async def on_POST(self, request: SynapseRequest) -> None: requester = await self.auth.get_user_by_req(request) raw_content_length = request.getHeader("Content-Length") if raw_content_length is None: diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 04fc7bdcef..ba00e35a9e 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -28,6 +28,7 @@ from typing_extensions import Literal from twisted.internet import defer from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource from synapse.api.errors import Codes from synapse.events import EventBase @@ -41,12 +42,13 @@ from synapse.module_api import ModuleApi from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers from synapse.rest import admin from synapse.rest.client import login +from synapse.rest.media.thumbnail_resource import ThumbnailResource from synapse.server import HomeServer from synapse.types import JsonDict, RoomAlias from synapse.util import Clock from tests import unittest -from tests.server import FakeChannel, FakeSite, make_request +from tests.server import FakeChannel from tests.test_utils import SMALL_PNG from tests.utils import default_config @@ -288,22 +290,22 @@ class MediaRepoTests(unittest.HomeserverTestCase): return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_resource = hs.get_media_repository_resource() - self.download_resource = media_resource.children[b"download"] - self.thumbnail_resource = media_resource.children[b"thumbnail"] self.store = hs.get_datastores().main self.media_repo = hs.get_media_repository() self.media_id = "example.com/12345" + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + def _req( self, content_disposition: Optional[bytes], include_content_type: bool = True ) -> FakeChannel: - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id, + f"/_matrix/media/v3/download/{self.media_id}", shorthand=False, await_result=False, ) @@ -481,11 +483,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): # Fetching again should work, without re-requesting the image from the # remote. params = "?width=32&height=32&method=scale" - channel = make_request( - self.reactor, - FakeSite(self.thumbnail_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id + params, + f"/_matrix/media/v3/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -511,11 +511,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) shutil.rmtree(thumbnail_dir, ignore_errors=True) - channel = make_request( - self.reactor, - FakeSite(self.thumbnail_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id + params, + f"/_matrix/media/v3/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -549,11 +547,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): """ params = "?width=32&height=32&method=" + method - channel = make_request( - self.reactor, - FakeSite(self.thumbnail_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id + params, + f"/_matrix/media/r0/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -590,7 +586,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.json_body, { "errcode": "M_UNKNOWN", - "error": "Cannot find any thumbnails for the requested media ([b'example.com', b'12345']). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", + "error": "Cannot find any thumbnails for the requested media ('/_matrix/media/r0/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", }, ) else: @@ -600,7 +596,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.json_body, { "errcode": "M_NOT_FOUND", - "error": "Not found [b'example.com', b'12345']", + "error": "Not found '/_matrix/media/r0/thumbnail/example.com/12345'", }, ) @@ -609,12 +605,17 @@ class MediaRepoTests(unittest.HomeserverTestCase): """Test that choosing between thumbnails with the same quality rating succeeds. We are not particular about which thumbnail is chosen.""" + media_repo = self.hs.get_media_repository() + thumbnail_resouce = ThumbnailResource( + self.hs, media_repo, media_repo.media_storage + ) + self.assertIsNotNone( - self.thumbnail_resource._select_thumbnail( + thumbnail_resouce._select_thumbnail( desired_width=desired_size, desired_height=desired_size, desired_method=method, - desired_type=self.test_image.content_type, + desired_type=self.test_image.content_type, # type: ignore[arg-type] # Provide two identical thumbnails which are guaranteed to have the same # quality rating. thumbnail_infos=[ @@ -636,7 +637,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): }, ], file_id=f"image{self.test_image.extension.decode()}", - url_cache=None, + url_cache=False, server_name=None, ) ) @@ -725,13 +726,13 @@ class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase): self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") - # Allow for uploading and downloading to/from the media repo - self.media_repo = hs.get_media_repository_resource() - self.download_resource = self.media_repo.children[b"download"] - self.upload_resource = self.media_repo.children[b"upload"] - load_legacy_spam_checkers(hs) + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + def default_config(self) -> Dict[str, Any]: config = default_config("test") @@ -751,9 +752,7 @@ class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase): def test_upload_innocent(self) -> None: """Attempt to upload some innocent data that should be allowed.""" - self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200 - ) + self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) def test_upload_ban(self) -> None: """Attempt to upload some data that includes bytes "evil", which should @@ -762,9 +761,7 @@ class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase): data = b"Some evil data" - self.helper.upload_media( - self.upload_resource, data, tok=self.tok, expect_code=400 - ) + self.helper.upload_media(data, tok=self.tok, expect_code=400) EVIL_DATA = b"Some evil data" @@ -781,15 +778,15 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") - # Allow for uploading and downloading to/from the media repo - self.media_repo = hs.get_media_repository_resource() - self.download_resource = self.media_repo.children[b"download"] - self.upload_resource = self.media_repo.children[b"upload"] - hs.get_module_api().register_spam_checker_callbacks( check_media_file_for_spam=self.check_media_file_for_spam ) + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo ) -> Union[Codes, Literal["NOT_SPAM"], Tuple[Codes, JsonDict]]: @@ -805,21 +802,16 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): def test_upload_innocent(self) -> None: """Attempt to upload some innocent data that should be allowed.""" - self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200 - ) + self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) def test_upload_ban(self) -> None: """Attempt to upload some data that includes bytes "evil", which should get rejected by the spam checker. """ - self.helper.upload_media( - self.upload_resource, EVIL_DATA, tok=self.tok, expect_code=400 - ) + self.helper.upload_media(EVIL_DATA, tok=self.tok, expect_code=400) self.helper.upload_media( - self.upload_resource, EVIL_DATA_EXPERIMENT, tok=self.tok, expect_code=400, diff --git a/tests/media/test_url_previewer.py b/tests/media/test_url_previewer.py index 46ecde5344..04b69f378a 100644 --- a/tests/media/test_url_previewer.py +++ b/tests/media/test_url_previewer.py @@ -61,9 +61,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): return self.setup_test_homeserver(config=config) def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_repo_resource = hs.get_media_repository_resource() - preview_url = media_repo_resource.children[b"preview_url"] - self.url_previewer = preview_url._url_previewer + media_repo = hs.get_media_repository() + assert media_repo.url_previewer is not None + self.url_previewer = media_repo.url_previewer def test_all_urls_allowed(self) -> None: self.assertFalse(self.url_previewer._is_url_blocked("http://matrix.org")) diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 6e78daa830..b230a6c361 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import os -from typing import Optional, Tuple +from typing import Any, Optional, Tuple from twisted.internet.interfaces import IOpenSSLServerConnectionCreator from twisted.internet.protocol import Factory @@ -29,7 +29,7 @@ from synapse.util import Clock from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.replication._base import BaseMultiWorkerStreamTestCase -from tests.server import FakeChannel, FakeSite, FakeTransport, make_request +from tests.server import FakeChannel, FakeTransport, make_request from tests.test_utils import SMALL_PNG logger = logging.getLogger(__name__) @@ -56,6 +56,16 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): conf["federation_custom_ca_list"] = [get_test_ca_cert_file()] return conf + def make_worker_hs( + self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any + ) -> HomeServer: + worker_hs = super().make_worker_hs(worker_app, extra_config, **kwargs) + # Force the media paths onto the replication resource. + worker_hs.get_media_repository_resource().register_servlets( + self._hs_to_site[worker_hs].resource, worker_hs + ) + return worker_hs + def _get_media_req( self, hs: HomeServer, target: str, media_id: str ) -> Tuple[FakeChannel, Request]: @@ -68,12 +78,11 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): The channel for the *client* request and the *outbound* request for the media which the caller should respond to. """ - resource = hs.get_media_repository_resource().children[b"download"] channel = make_request( self.reactor, - FakeSite(resource, self.reactor), + self._hs_to_site[hs], "GET", - f"/{target}/{media_id}", + f"/_matrix/media/r0/download/{target}/{media_id}", shorthand=False, access_token=self.access_token, await_result=False, diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 359d131b37..8646b2f0fd 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -13,10 +13,12 @@ # limitations under the License. import urllib.parse +from typing import Dict from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.http.server import JsonResource @@ -26,7 +28,6 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request from tests.test_utils import SMALL_PNG @@ -55,21 +56,18 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # Allow for uploading and downloading to/from the media repo - self.media_repo = hs.get_media_repository_resource() - self.download_resource = self.media_repo.children[b"download"] - self.upload_resource = self.media_repo.children[b"upload"] + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources def _ensure_quarantined( self, admin_user_tok: str, server_and_media_id: str ) -> None: """Ensure a piece of media is quarantined when trying to access it.""" - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=admin_user_tok, ) @@ -117,20 +115,16 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): non_admin_user_tok = self.login("id_nonadmin", "pass") # Upload some media into the room - response = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=admin_user_tok - ) + response = self.helper.upload_media(SMALL_PNG, tok=admin_user_tok) # Extract media ID from the response server_name_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' server_name, media_id = server_name_and_media_id.split("/") # Attempt to access the media - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - server_name_and_media_id, + f"/_matrix/media/v3/download/{server_name_and_media_id}", shorthand=False, access_token=non_admin_user_tok, ) @@ -173,12 +167,8 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): self.helper.join(room_id, non_admin_user, tok=non_admin_user_tok) # Upload some media - response_1 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) - response_2 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) + response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) + response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) # Extract mxcs mxc_1 = response_1["content_uri"] @@ -227,12 +217,8 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): non_admin_user_tok = self.login("user_nonadmin", "pass") # Upload some media - response_1 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) - response_2 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) + response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) + response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) # Extract media IDs server_and_media_id_1 = response_1["content_uri"][6:] @@ -265,12 +251,8 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): non_admin_user_tok = self.login("user_nonadmin", "pass") # Upload some media - response_1 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) - response_2 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) + response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) + response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) # Extract media IDs server_and_media_id_1 = response_1["content_uri"][6:] @@ -304,11 +286,9 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): self._ensure_quarantined(admin_user_tok, server_and_media_id_1) # Attempt to access each piece of media - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id_2, + f"/_matrix/media/v3/download/{server_and_media_id_2}", shorthand=False, access_token=non_admin_user_tok, ) diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 6d04911d67..278808abb5 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -13,10 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +from typing import Dict from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.errors import Codes @@ -26,22 +28,27 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request from tests.test_utils import SMALL_PNG VALID_TIMESTAMP = 1609459200000 # 2021-01-01 in milliseconds INVALID_TIMESTAMP_IN_S = 1893456000 # 2030-01-01 in seconds -class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): +class _AdminMediaTests(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, login.register_servlets, ] + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + + +class DeleteMediaByIDTestCase(_AdminMediaTests): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.media_repo = hs.get_media_repository_resource() self.server_name = hs.hostname self.admin_user = self.register_user("admin", "pass", admin=True) @@ -117,12 +124,8 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): Tests that delete a media is successfully """ - download_resource = self.media_repo.children[b"download"] - upload_resource = self.media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -134,11 +137,9 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): self.assertEqual(server_name, self.server_name) # Attempt to access media - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=self.admin_user_tok, ) @@ -173,11 +174,9 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): ) # Attempt to access media - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=self.admin_user_tok, ) @@ -194,7 +193,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): self.assertFalse(os.path.exists(local_path)) -class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): +class DeleteMediaByDateSizeTestCase(_AdminMediaTests): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -529,11 +528,8 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): """ Create a media and return media_id and server_and_media_id """ - upload_resource = self.media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -553,16 +549,12 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): """ Try to access a media and check the result """ - download_resource = self.media_repo.children[b"download"] - media_id = server_and_media_id.split("/")[1] local_path = self.filepaths.local_media_filepath(media_id) - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=self.admin_user_tok, ) @@ -591,27 +583,16 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.assertFalse(os.path.exists(local_path)) -class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets, - synapse.rest.admin.register_servlets_for_media_repo, - login.register_servlets, - ] - +class QuarantineMediaByIDTestCase(_AdminMediaTests): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_repo = hs.get_media_repository_resource() self.store = hs.get_datastores().main self.server_name = hs.hostname self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - # Create media - upload_resource = media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -720,26 +701,16 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): self.assertFalse(media_info["quarantined_by"]) -class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets, - synapse.rest.admin.register_servlets_for_media_repo, - login.register_servlets, - ] - +class ProtectMediaByIDTestCase(_AdminMediaTests): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_repo = hs.get_media_repository_resource() + hs.get_media_repository_resource() self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - # Create media - upload_resource = media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -816,7 +787,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): self.assertFalse(media_info["safe_from_quarantine"]) -class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): +class PurgeMediaCacheTestCase(_AdminMediaTests): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index b60f16b914..cd8ee274d8 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -12,9 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional +from typing import Dict, List, Optional from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.errors import Codes @@ -34,8 +35,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.media_repo = hs.get_media_repository_resource() - self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -44,6 +43,11 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url = "/_synapse/admin/v1/statistics/users/media" + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + def test_no_auth(self) -> None: """ Try to list users without authentication. @@ -470,12 +474,9 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): user_token: Access token of the user number_media: Number of media to be created for the user """ - upload_resource = self.media_repo.children[b"upload"] for _ in range(number_media): # Upload some media into the room - self.helper.upload_media( - upload_resource, SMALL_PNG, tok=user_token, expect_code=200 - ) + self.helper.upload_media(SMALL_PNG, tok=user_token, expect_code=200) def _check_fields(self, content: List[JsonDict]) -> None: """Checks that all attributes are present in content diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index b326ad2c90..37f37a09d8 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -17,12 +17,13 @@ import hmac import os import urllib.parse from binascii import unhexlify -from typing import List, Optional +from typing import Dict, List, Optional from unittest.mock import AsyncMock, Mock, patch from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes @@ -45,7 +46,6 @@ from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request from tests.test_utils import SMALL_PNG from tests.unittest import override_config @@ -3421,7 +3421,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.media_repo = hs.get_media_repository_resource() self.filepaths = MediaFilePaths(hs.config.media.media_store_path) self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3432,6 +3431,11 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): self.other_user ) + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + @parameterized.expand(["GET", "DELETE"]) def test_no_auth(self, method: str) -> None: """Try to list media of an user without authentication.""" @@ -3907,12 +3911,9 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): Returns: The ID of the newly created media. """ - upload_resource = self.media_repo.children[b"upload"] - download_resource = self.media_repo.children[b"download"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, image_data, user_token, filename, expect_code=200 + image_data, user_token, filename, expect_code=200 ) # Extract media ID from the response @@ -3920,11 +3921,9 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): media_id = server_and_media_id.split("/")[1] # Try to access a media and to create `last_access_ts` - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=user_token, ) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 9532e5ddc1..465b696c0b 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -37,7 +37,6 @@ import attr from typing_extensions import Literal from twisted.test.proto_helpers import MemoryReactorClock -from twisted.web.resource import Resource from twisted.web.server import Site from synapse.api.constants import Membership @@ -45,7 +44,7 @@ from synapse.api.errors import Codes from synapse.server import HomeServer from synapse.types import JsonDict -from tests.server import FakeChannel, FakeSite, make_request +from tests.server import FakeChannel, make_request from tests.test_utils.html_parsers import TestHtmlParser from tests.test_utils.oidc import FakeAuthorizationGrant, FakeOidcServer @@ -558,7 +557,6 @@ class RestHelper: def upload_media( self, - resource: Resource, image_data: bytes, tok: str, filename: str = "test.png", @@ -576,7 +574,7 @@ class RestHelper: path = "/_matrix/media/r0/upload?filename=%s" % (filename,) channel = make_request( self.reactor, - FakeSite(resource, self.reactor), + self.site, "POST", path, content=image_data, diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index 05d5e39cab..24459c6af4 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -24,10 +24,10 @@ from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import IAddress, IResolutionReceiver from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor +from twisted.web.resource import Resource from synapse.config.oembed import OEmbedEndpointConfig from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS -from synapse.rest.media.media_repository_resource import MediaRepositoryResource from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -117,8 +117,8 @@ class URLPreviewTests(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.media_repo = hs.get_media_repository() - media_repo_resource = hs.get_media_repository_resource() - self.preview_url = media_repo_resource.children[b"preview_url"] + assert self.media_repo.url_previewer is not None + self.url_previewer = self.media_repo.url_previewer self.lookups: Dict[str, Any] = {} @@ -143,8 +143,15 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.reactor.nameResolver = Resolver() # type: ignore[assignment] - def create_test_resource(self) -> MediaRepositoryResource: - return self.hs.get_media_repository_resource() + def create_resource_dict(self) -> Dict[str, Resource]: + """Create a resource tree for the test server + + A resource tree is a mapping from path to twisted.web.resource. + + The default implementation creates a JsonResource and calls each function in + `servlets` to register servlets against it. + """ + return {"/_matrix/media": self.hs.get_media_repository_resource()} def _assert_small_png(self, json_body: JsonDict) -> None: """Assert properties from the SMALL_PNG test image.""" @@ -159,7 +166,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -183,7 +190,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check the cache returns the correct response channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://matrix.org", + shorthand=False, ) # Check the cache response has the same content @@ -193,13 +202,15 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) # Clear the in-memory cache - self.assertIn("http://matrix.org", self.preview_url._url_previewer._cache) - self.preview_url._url_previewer._cache.pop("http://matrix.org") - self.assertNotIn("http://matrix.org", self.preview_url._url_previewer._cache) + self.assertIn("http://matrix.org", self.url_previewer._cache) + self.url_previewer._cache.pop("http://matrix.org") + self.assertNotIn("http://matrix.org", self.url_previewer._cache) # Check the database cache returns the correct response channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://matrix.org", + shorthand=False, ) # Check the cache response has the same content @@ -221,7 +232,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -251,7 +262,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -287,7 +298,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -328,7 +339,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -363,7 +374,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -396,7 +407,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://example.com", + "/_matrix/media/v3/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -425,7 +436,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) # No requests made. @@ -446,7 +459,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) self.assertEqual(channel.code, 502) @@ -463,7 +478,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): Blocked IP addresses, accessed directly, are not spidered. """ channel = self.make_request( - "GET", "preview_url?url=http://192.168.1.1", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://192.168.1.1", + shorthand=False, ) # No requests made. @@ -479,7 +496,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): Blocked IP ranges, accessed directly, are not spidered. """ channel = self.make_request( - "GET", "preview_url?url=http://1.1.1.2", shorthand=False + "GET", "/_matrix/media/v3/preview_url?url=http://1.1.1.2", shorthand=False ) self.assertEqual(channel.code, 403) @@ -497,7 +514,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://example.com", + "/_matrix/media/v3/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -533,7 +550,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): ] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) self.assertEqual(channel.code, 502) self.assertEqual( @@ -553,7 +572,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): ] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) # No requests made. @@ -574,7 +595,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv6Address, "2001:800::1")] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) self.assertEqual(channel.code, 502) @@ -591,10 +614,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): OPTIONS returns the OPTIONS. """ channel = self.make_request( - "OPTIONS", "preview_url?url=http://example.com", shorthand=False + "OPTIONS", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) - self.assertEqual(channel.code, 200) - self.assertEqual(channel.json_body, {}) + self.assertEqual(channel.code, 204) def test_accept_language_config_option(self) -> None: """ @@ -605,7 +629,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Build and make a request to the server channel = self.make_request( "GET", - "preview_url?url=http://example.com", + "/_matrix/media/v3/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -658,7 +682,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -708,7 +732,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -750,7 +774,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -790,7 +814,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -831,7 +855,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"preview_url?{query_params}", + f"/_matrix/media/v3/preview_url?{query_params}", shorthand=False, ) self.pump() @@ -852,7 +876,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -889,7 +913,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -949,7 +973,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -998,7 +1022,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://www.hulu.com/watch/12345", + "/_matrix/media/v3/preview_url?url=http://www.hulu.com/watch/12345", shorthand=False, await_result=False, ) @@ -1043,7 +1067,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1072,7 +1096,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1164,7 +1188,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1205,7 +1229,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://cdn.twitter.com/matrixdotorg", + "/_matrix/media/v3/preview_url?url=http://cdn.twitter.com/matrixdotorg", shorthand=False, await_result=False, ) @@ -1247,7 +1271,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check fetching channel = self.make_request( "GET", - f"download/{host}/{media_id}", + f"/_matrix/media/v3/download/{host}/{media_id}", shorthand=False, await_result=False, ) @@ -1260,7 +1284,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"download/{host}/{media_id}", + f"/_matrix/media/v3/download/{host}/{media_id}", shorthand=False, await_result=False, ) @@ -1295,7 +1319,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check fetching channel = self.make_request( "GET", - f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + f"/_matrix/media/v3/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", shorthand=False, await_result=False, ) @@ -1313,7 +1337,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + f"/_matrix/media/v3/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", shorthand=False, await_result=False, ) @@ -1343,7 +1367,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertTrue(os.path.isdir(thumbnail_dir)) self.reactor.advance(IMAGE_CACHE_EXPIRY_MS * 1000 + 1) - self.get_success(self.preview_url._url_previewer._expire_url_cache_data()) + self.get_success(self.url_previewer._expire_url_cache_data()) for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs: self.assertFalse( @@ -1363,7 +1387,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=" + bad_url, + "/_matrix/media/v3/preview_url?url=" + bad_url, shorthand=False, await_result=False, ) @@ -1372,7 +1396,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=" + good_url, + "/_matrix/media/v3/preview_url?url=" + good_url, shorthand=False, await_result=False, ) @@ -1404,7 +1428,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=" + bad_url, + "/_matrix/media/v3/preview_url?url=" + bad_url, shorthand=False, await_result=False, ) diff --git a/tests/unittest.py b/tests/unittest.py index dbaff361b4..99ad02eb06 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -60,7 +60,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.federation.transport.server import TransportLayerServer -from synapse.http.server import JsonResource +from synapse.http.server import JsonResource, OptionsResource from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import ( SENTINEL_CONTEXT, @@ -459,7 +459,7 @@ class HomeserverTestCase(TestCase): The default calls `self.create_resource_dict` and builds the resultant dict into a tree. """ - root_resource = Resource() + root_resource = OptionsResource() create_resource_tree(self.create_resource_dict(), root_resource) return root_resource From 694802eecdfe18544be5252605bd427e3a5a2b2e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:23:20 -0400 Subject: [PATCH 555/562] Add documentation on background updates. (#16420) --- changelog.d/16420.doc | 1 + docs/development/database_schema.md | 61 +++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 changelog.d/16420.doc diff --git a/changelog.d/16420.doc b/changelog.d/16420.doc new file mode 100644 index 0000000000..1c0c6b9577 --- /dev/null +++ b/changelog.d/16420.doc @@ -0,0 +1 @@ +Document internal background update mechanism. diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index 675080ae1b..37a06acc12 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -150,6 +150,67 @@ def run_upgrade( ... ``` +## Background updates + +It is sometimes appropriate to perform database migrations as part of a background +process (instead of blocking Synapse until the migration is done). In particular, +this is useful for migrating data when adding new columns or tables. + +Pending background updates stored in the `background_updates` table and are denoted +by a unique name, the current status (stored in JSON), and some dependency information: + +* Whether the update requires a previous update to be complete. +* A rough ordering for which to complete updates. + +A new background updates needs to be added to the `background_updates` table: + +```sql +INSERT INTO background_updates (ordering, update_name, depends_on, progress_json) VALUES + (7706, 'my_background_update', 'a_previous_background_update' '{}'); +``` + +And then needs an associated handler in the appropriate datastore: + +```python +self.db_pool.updates.register_background_update_handler( + "my_background_update", + update_handler=self._my_background_update, +) +``` + +There are a few types of updates that can be performed, see the `BackgroundUpdater`: + +* `register_background_update_handler`: A generic handler for custom SQL +* `register_background_index_update`: Create an index in the background +* `register_background_validate_constraint`: Validate a constraint in the background + (PostgreSQL-only) +* `register_background_validate_constraint_and_delete_rows`: Similar to + `register_background_validate_constraint`, but deletes rows which don't fit + the constraint. + +For `register_background_update_handler`, the generic handler must track progress +and then finalize the background update: + +```python +async def _my_background_update(self, progress: JsonDict, batch_size: int) -> int: + def _do_something(txn: LoggingTransaction) -> int: + ... + self.db_pool.updates._background_update_progress_txn( + txn, "my_background_update", {"last_processed": last_processed} + ) + return last_processed - prev_last_processed + + num_processed = await self.db_pool.runInteraction("_do_something", _do_something) + await self.db_pool.updates._end_background_update("my_background_update") + + return num_processed +``` + +Synapse will attempt to rate-limit how often background updates are run via the +given batch-size and the returned number of processed entries (and how long the +function took to run). See +[background update controller callbacks](../modules/background_update_controller_callbacks.md). + ## Boolean columns Boolean columns require special treatment, since SQLite treats booleans the From ae5b997cfac1a7d7540be7352f1c01295ce9100a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:25:44 -0400 Subject: [PATCH 556/562] Fix comments related to replication. (#16428) --- changelog.d/16428.misc | 1 + synapse/federation/sender/__init__.py | 2 +- synapse/replication/tcp/commands.py | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16428.misc diff --git a/changelog.d/16428.misc b/changelog.d/16428.misc new file mode 100644 index 0000000000..75c9c3b757 --- /dev/null +++ b/changelog.d/16428.misc @@ -0,0 +1 @@ +Improve code comments. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index fb20fd8a10..7b6b1da090 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -67,7 +67,7 @@ The loop continues so long as there is anything to send. At each iteration of th When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop* (`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`. -(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour +(Only once the catch-up mode has been exited can the regular transaction transmission behaviour be resumed.) *Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 1b92302fd3..0f0f851b79 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -446,8 +446,6 @@ class RemoteServerUpCommand(_SimpleCommand): """Sent when a worker has detected that a remote server is no longer "down" and retry timings should be reset. - If sent from a client the server will relay to all other workers. - Format:: REMOTE_SERVER_UP From fc31b495b3a7f170019591c2e40e699b61c067a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:27:35 -0400 Subject: [PATCH 557/562] Stop sending incorrect knock_state_events. (#16403) Synapse was incorrectly implemented with a knock_state_events property on some APIs (instead of knock_room_state). This was correct in Synapse 1.70.0, but *both* fields were sent to also be compatible with Synapse versions expecting the wrong field. Enough time has passed that only the correct field needs to be included/handled. --- changelog.d/16403.bugfix | 1 + synapse/federation/federation_client.py | 4 ++-- synapse/federation/federation_server.py | 9 +-------- synapse/federation/transport/client.py | 2 +- synapse/handlers/federation.py | 13 ++----------- tests/federation/transport/test_knocking.py | 2 +- 6 files changed, 8 insertions(+), 23 deletions(-) create mode 100644 changelog.d/16403.bugfix diff --git a/changelog.d/16403.bugfix b/changelog.d/16403.bugfix new file mode 100644 index 0000000000..453c975a63 --- /dev/null +++ b/changelog.d/16403.bugfix @@ -0,0 +1 @@ +Remove legacy unspecced `knock_state_events` field returned in some responses. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c8bc46415d..1a7fa175ec 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1402,7 +1402,7 @@ class FederationClient(FederationBase): The remote homeserver return some state from the room. The response dictionary is in the form: - {"knock_state_events": [, ...]} + {"knock_room_state": [, ...]} The list of state events may be empty. @@ -1429,7 +1429,7 @@ class FederationClient(FederationBase): The remote homeserver can optionally return some state from the room. The response dictionary is in the form: - {"knock_state_events": [, ...]} + {"knock_room_state": [, ...]} The list of state events may be empty. """ diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ec8e770430..6ac8d16095 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -850,14 +850,7 @@ class FederationServer(FederationBase): context, self._room_prejoin_state_types ) ) - return { - "knock_room_state": stripped_room_state, - # Since v1.37, Synapse incorrectly used "knock_state_events" for this field. - # Thus, we also populate a 'knock_state_events' with the same content to - # support old instances. - # See https://github.com/matrix-org/synapse/issues/14088. - "knock_state_events": stripped_room_state, - } + return {"knock_room_state": stripped_room_state} async def _on_send_membership_event( self, origin: str, content: JsonDict, membership_type: str, room_id: str diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index b5e4b2680e..fab4800717 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -431,7 +431,7 @@ class TransportLayerClient: The remote homeserver can optionally return some state from the room. The response dictionary is in the form: - {"knock_state_events": [, ...]} + {"knock_room_state": [, ...]} The list of state events may be empty. """ diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 29cd45550a..807a0867cc 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -868,19 +868,10 @@ class FederationHandler: # This is a bit of a hack and is cribbing off of invites. Basically we # store the room state here and retrieve it again when this event appears # in the invitee's sync stream. It is stripped out for all other local users. - stripped_room_state = ( - knock_response.get("knock_room_state") - # Since v1.37, Synapse incorrectly used "knock_state_events" for this field. - # Thus, we also check for a 'knock_state_events' to support old instances. - # See https://github.com/matrix-org/synapse/issues/14088. - or knock_response.get("knock_state_events") - ) + stripped_room_state = knock_response.get("knock_room_state") if stripped_room_state is None: - raise KeyError( - "Missing 'knock_room_state' (or legacy 'knock_state_events') field in " - "send_knock response" - ) + raise KeyError("Missing 'knock_room_state' field in send_knock response") event.unsigned["knock_room_state"] = stripped_room_state diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index 3f42f79f26..b63ef3d4ed 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -308,7 +308,7 @@ class FederationKnockingTestCase( self.assertEqual(200, channel.code, channel.result) # Check that we got the stripped room state in return - room_state_events = channel.json_body["knock_state_events"] + room_state_events = channel.json_body["knock_room_state"] # Validate the stripped room state events self.check_knock_room_state_against_room_state( From cabd57746004fe2dacc11aa8d373854a3d25e306 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 08:29:33 -0400 Subject: [PATCH 558/562] Drop unused tables & unneeded access token ID for events. (#16268) Drop the event_txn_id table and the tables related to MSC2716, which is no longer supported in Synapse. --- changelog.d/16268.misc | 1 + synapse/handlers/message.py | 8 ++----- synapse/storage/schema/__init__.py | 4 ++-- .../main/delta/82/03_drop_old_tables.sql | 24 +++++++++++++++++++ 4 files changed, 29 insertions(+), 8 deletions(-) create mode 100644 changelog.d/16268.misc create mode 100644 synapse/storage/schema/main/delta/82/03_drop_old_tables.sql diff --git a/changelog.d/16268.misc b/changelog.d/16268.misc new file mode 100644 index 0000000000..26059b108e --- /dev/null +++ b/changelog.d/16268.misc @@ -0,0 +1 @@ +Clean-up unused tables. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 44dbbf81dd..d0d4626ed6 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -693,13 +693,9 @@ class EventCreationHandler: if require_consent and not is_exempt: await self.assert_accepted_privacy_policy(requester) - # Save the access token ID, the device ID and the transaction ID in the event - # internal metadata. This is useful to determine if we should echo the - # transaction_id in events. + # Save the the device ID and the transaction ID in the event internal metadata. + # This is useful to determine if we should echo the transaction_id in events. # See `synapse.events.utils.EventClientSerializer.serialize_event` - if requester.access_token_id is not None: - builder.internal_metadata.token_id = requester.access_token_id - if requester.device_id is not None: builder.internal_metadata.device_id = requester.device_id diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 5b50bd66bc..de89de7d74 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -125,8 +125,8 @@ Changes in SCHEMA_VERSION = 82 SCHEMA_COMPAT_VERSION = ( - # The `event_txn_id_device_id` must be written to for new events. - 80 + # The event_txn_id table and tables from MSC2716 no longer exist. + 82 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql b/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql new file mode 100644 index 0000000000..149020bbd7 --- /dev/null +++ b/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql @@ -0,0 +1,24 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Drop the old event transaction ID table, the event_txn_id_device_id table +-- should be used instead. +DROP TABLE IF EXISTS event_txn_id; + +-- Drop tables related to MSC2716 since the implementation is being removed +DROP TABLE insertion_events; +DROP TABLE insertion_event_edges; +DROP TABLE insertion_event_extremities; +DROP TABLE batch_events; From 7615e2bf48d7bed3da7235d60f84a3c847ac78f5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 10:12:43 -0400 Subject: [PATCH 559/562] Return ThumbnailInfo in more places (#16438) Improves type hints by using concrete types instead of dictionaries. --- changelog.d/16438.misc | 1 + synapse/media/_base.py | 2 +- synapse/media/media_repository.py | 3 + synapse/rest/media/thumbnail_resource.py | 98 ++++++++----------- .../databases/main/media_repository.py | 30 +++++- tests/media/test_media_storage.py | 36 +++---- 6 files changed, 90 insertions(+), 80 deletions(-) create mode 100644 changelog.d/16438.misc diff --git a/changelog.d/16438.misc b/changelog.d/16438.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16438.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index d103b43449..13345acf75 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -332,7 +332,7 @@ class ThumbnailInfo: # Content type of thumbnail, e.g. image/png type: str # The size of the media file, in bytes. - length: Optional[int] = None + length: int @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index d11c2ff4ee..7fd46901f7 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -624,6 +624,7 @@ class MediaRepository: height=t_height, method=t_method, type=t_type, + length=t_byte_source.tell(), ), ) @@ -694,6 +695,7 @@ class MediaRepository: height=t_height, method=t_method, type=t_type, + length=t_byte_source.tell(), ), ) @@ -839,6 +841,7 @@ class MediaRepository: height=t_height, method=t_method, type=t_type, + length=t_byte_source.tell(), ), ) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index f9cd773f77..85b6bdbe72 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -15,7 +15,7 @@ import logging import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.errors import Codes, SynapseError, cs_error from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP @@ -159,30 +159,24 @@ class ThumbnailResource(RestServlet): thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) for info in thumbnail_infos: - t_w = info["thumbnail_width"] == desired_width - t_h = info["thumbnail_height"] == desired_height - t_method = info["thumbnail_method"] == desired_method - t_type = info["thumbnail_type"] == desired_type + t_w = info.width == desired_width + t_h = info.height == desired_height + t_method = info.method == desired_method + t_type = info.type == desired_type if t_w and t_h and t_method and t_type: file_info = FileInfo( server_name=None, file_id=media_id, url_cache=media_info["url_cache"], - thumbnail=ThumbnailInfo( - width=info["thumbnail_width"], - height=info["thumbnail_height"], - type=info["thumbnail_type"], - method=info["thumbnail_method"], - ), + thumbnail=info, ) - t_type = file_info.thumbnail_type - t_length = info["thumbnail_length"] - responder = await self.media_storage.fetch_media(file_info) if responder: - await respond_with_responder(request, responder, t_type, t_length) + await respond_with_responder( + request, responder, info.type, info.length + ) return logger.debug("We don't have a thumbnail of that size. Generating") @@ -222,29 +216,23 @@ class ThumbnailResource(RestServlet): file_id = media_info["filesystem_id"] for info in thumbnail_infos: - t_w = info["thumbnail_width"] == desired_width - t_h = info["thumbnail_height"] == desired_height - t_method = info["thumbnail_method"] == desired_method - t_type = info["thumbnail_type"] == desired_type + t_w = info.width == desired_width + t_h = info.height == desired_height + t_method = info.method == desired_method + t_type = info.type == desired_type if t_w and t_h and t_method and t_type: file_info = FileInfo( server_name=server_name, file_id=media_info["filesystem_id"], - thumbnail=ThumbnailInfo( - width=info["thumbnail_width"], - height=info["thumbnail_height"], - type=info["thumbnail_type"], - method=info["thumbnail_method"], - ), + thumbnail=info, ) - t_type = file_info.thumbnail_type - t_length = info["thumbnail_length"] - responder = await self.media_storage.fetch_media(file_info) if responder: - await respond_with_responder(request, responder, t_type, t_length) + await respond_with_responder( + request, responder, info.type, info.length + ) return logger.debug("We don't have a thumbnail of that size. Generating") @@ -304,7 +292,7 @@ class ThumbnailResource(RestServlet): desired_height: int, desired_method: str, desired_type: str, - thumbnail_infos: List[Dict[str, Any]], + thumbnail_infos: List[ThumbnailInfo], media_id: str, file_id: str, url_cache: bool, @@ -319,7 +307,7 @@ class ThumbnailResource(RestServlet): desired_height: The desired height, the returned thumbnail may be larger than this. desired_method: The desired method used to generate the thumbnail. desired_type: The desired content-type of the thumbnail. - thumbnail_infos: A list of dictionaries of candidate thumbnails. + thumbnail_infos: A list of thumbnail info of candidate thumbnails. file_id: The ID of the media that a thumbnail is being requested for. url_cache: True if this is from a URL cache. server_name: The server name, if this is a remote thumbnail. @@ -443,7 +431,7 @@ class ThumbnailResource(RestServlet): desired_height: int, desired_method: str, desired_type: str, - thumbnail_infos: List[Dict[str, Any]], + thumbnail_infos: List[ThumbnailInfo], file_id: str, url_cache: bool, server_name: Optional[str], @@ -456,7 +444,7 @@ class ThumbnailResource(RestServlet): desired_height: The desired height, the returned thumbnail may be larger than this. desired_method: The desired method used to generate the thumbnail. desired_type: The desired content-type of the thumbnail. - thumbnail_infos: A list of dictionaries of candidate thumbnails. + thumbnail_infos: A list of thumbnail infos of candidate thumbnails. file_id: The ID of the media that a thumbnail is being requested for. url_cache: True if this is from a URL cache. server_name: The server name, if this is a remote thumbnail. @@ -474,21 +462,25 @@ class ThumbnailResource(RestServlet): if desired_method == "crop": # Thumbnails that match equal or larger sizes of desired width/height. - crop_info_list: List[Tuple[int, int, int, bool, int, Dict[str, Any]]] = [] + crop_info_list: List[ + Tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + ] = [] # Other thumbnails. - crop_info_list2: List[Tuple[int, int, int, bool, int, Dict[str, Any]]] = [] + crop_info_list2: List[ + Tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + ] = [] for info in thumbnail_infos: # Skip thumbnails generated with different methods. - if info["thumbnail_method"] != "crop": + if info.method != "crop": continue - t_w = info["thumbnail_width"] - t_h = info["thumbnail_height"] + t_w = info.width + t_h = info.height aspect_quality = abs(d_w * t_h - d_h * t_w) min_quality = 0 if d_w <= t_w and d_h <= t_h else 1 size_quality = abs((d_w - t_w) * (d_h - t_h)) - type_quality = desired_type != info["thumbnail_type"] - length_quality = info["thumbnail_length"] + type_quality = desired_type != info.type + length_quality = info.length if t_w >= d_w or t_h >= d_h: crop_info_list.append( ( @@ -513,7 +505,7 @@ class ThumbnailResource(RestServlet): ) # Pick the most appropriate thumbnail. Some values of `desired_width` and # `desired_height` may result in a tie, in which case we avoid comparing on - # the thumbnail info dictionary and pick the thumbnail that appears earlier + # the thumbnail info and pick the thumbnail that appears earlier # in the list of candidates. if crop_info_list: thumbnail_info = min(crop_info_list, key=lambda t: t[:-1])[-1] @@ -521,20 +513,20 @@ class ThumbnailResource(RestServlet): thumbnail_info = min(crop_info_list2, key=lambda t: t[:-1])[-1] elif desired_method == "scale": # Thumbnails that match equal or larger sizes of desired width/height. - info_list: List[Tuple[int, bool, int, Dict[str, Any]]] = [] + info_list: List[Tuple[int, bool, int, ThumbnailInfo]] = [] # Other thumbnails. - info_list2: List[Tuple[int, bool, int, Dict[str, Any]]] = [] + info_list2: List[Tuple[int, bool, int, ThumbnailInfo]] = [] for info in thumbnail_infos: # Skip thumbnails generated with different methods. - if info["thumbnail_method"] != "scale": + if info.method != "scale": continue - t_w = info["thumbnail_width"] - t_h = info["thumbnail_height"] + t_w = info.width + t_h = info.height size_quality = abs((d_w - t_w) * (d_h - t_h)) - type_quality = desired_type != info["thumbnail_type"] - length_quality = info["thumbnail_length"] + type_quality = desired_type != info.type + length_quality = info.length if t_w >= d_w or t_h >= d_h: info_list.append((size_quality, type_quality, length_quality, info)) else: @@ -543,7 +535,7 @@ class ThumbnailResource(RestServlet): ) # Pick the most appropriate thumbnail. Some values of `desired_width` and # `desired_height` may result in a tie, in which case we avoid comparing on - # the thumbnail info dictionary and pick the thumbnail that appears earlier + # the thumbnail info and pick the thumbnail that appears earlier # in the list of candidates. if info_list: thumbnail_info = min(info_list, key=lambda t: t[:-1])[-1] @@ -555,13 +547,7 @@ class ThumbnailResource(RestServlet): file_id=file_id, url_cache=url_cache, server_name=server_name, - thumbnail=ThumbnailInfo( - width=thumbnail_info["thumbnail_width"], - height=thumbnail_info["thumbnail_height"], - type=thumbnail_info["thumbnail_type"], - method=thumbnail_info["thumbnail_method"], - length=thumbnail_info["thumbnail_length"], - ), + thumbnail=thumbnail_info, ) # No matching thumbnail was found. diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 8cebeb5189..2e6b176bd2 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -28,6 +28,7 @@ from typing import ( from synapse.api.constants import Direction from synapse.logging.opentracing import trace +from synapse.media._base import ThumbnailInfo from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -435,8 +436,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="store_url_cache", ) - async def get_local_media_thumbnails(self, media_id: str) -> List[Dict[str, Any]]: - return await self.db_pool.simple_select_list( + async def get_local_media_thumbnails(self, media_id: str) -> List[ThumbnailInfo]: + rows = await self.db_pool.simple_select_list( "local_media_repository_thumbnails", {"media_id": media_id}, ( @@ -448,6 +449,16 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): ), desc="get_local_media_thumbnails", ) + return [ + ThumbnailInfo( + width=row["thumbnail_width"], + height=row["thumbnail_height"], + method=row["thumbnail_method"], + type=row["thumbnail_type"], + length=row["thumbnail_length"], + ) + for row in rows + ] @trace async def store_local_thumbnail( @@ -556,8 +567,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def get_remote_media_thumbnails( self, origin: str, media_id: str - ) -> List[Dict[str, Any]]: - return await self.db_pool.simple_select_list( + ) -> List[ThumbnailInfo]: + rows = await self.db_pool.simple_select_list( "remote_media_cache_thumbnails", {"media_origin": origin, "media_id": media_id}, ( @@ -566,10 +577,19 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "thumbnail_method", "thumbnail_type", "thumbnail_length", - "filesystem_id", ), desc="get_remote_media_thumbnails", ) + return [ + ThumbnailInfo( + width=row["thumbnail_width"], + height=row["thumbnail_height"], + method=row["thumbnail_method"], + type=row["thumbnail_type"], + length=row["thumbnail_length"], + ) + for row in rows + ] @trace async def get_remote_media_thumbnail( diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index ba00e35a9e..15f5d644e4 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -34,7 +34,7 @@ from synapse.api.errors import Codes from synapse.events import EventBase from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable -from synapse.media._base import FileInfo +from synapse.media._base import FileInfo, ThumbnailInfo from synapse.media.filepath import MediaFilePaths from synapse.media.media_storage import MediaStorage, ReadableFileWrapper from synapse.media.storage_provider import FileStorageProviderBackend @@ -605,6 +605,8 @@ class MediaRepoTests(unittest.HomeserverTestCase): """Test that choosing between thumbnails with the same quality rating succeeds. We are not particular about which thumbnail is chosen.""" + + content_type = self.test_image.content_type.decode() media_repo = self.hs.get_media_repository() thumbnail_resouce = ThumbnailResource( self.hs, media_repo, media_repo.media_storage @@ -615,26 +617,24 @@ class MediaRepoTests(unittest.HomeserverTestCase): desired_width=desired_size, desired_height=desired_size, desired_method=method, - desired_type=self.test_image.content_type, # type: ignore[arg-type] + desired_type=content_type, # Provide two identical thumbnails which are guaranteed to have the same # quality rating. thumbnail_infos=[ - { - "thumbnail_width": 32, - "thumbnail_height": 32, - "thumbnail_method": method, - "thumbnail_type": self.test_image.content_type, - "thumbnail_length": 256, - "filesystem_id": f"thumbnail1{self.test_image.extension.decode()}", - }, - { - "thumbnail_width": 32, - "thumbnail_height": 32, - "thumbnail_method": method, - "thumbnail_type": self.test_image.content_type, - "thumbnail_length": 256, - "filesystem_id": f"thumbnail2{self.test_image.extension.decode()}", - }, + ThumbnailInfo( + width=32, + height=32, + method=method, + type=content_type, + length=256, + ), + ThumbnailInfo( + width=32, + height=32, + method=method, + type=content_type, + length=256, + ), ], file_id=f"image{self.test_image.extension.decode()}", url_cache=False, From 06bbf1029cf2558213646d3b692621bed5178066 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 11:41:57 -0400 Subject: [PATCH 560/562] Convert simple_select_list_paginate_txn to return tuples. (#16433) --- changelog.d/16433.misc | 1 + synapse/api/presence.py | 4 -- synapse/federation/send_queue.py | 2 +- synapse/rest/admin/federation.py | 8 ++- synapse/storage/database.py | 6 +- synapse/storage/databases/main/presence.py | 58 +++++++++++++------ .../storage/databases/main/transactions.py | 27 +++++---- 7 files changed, 67 insertions(+), 39 deletions(-) create mode 100644 changelog.d/16433.misc diff --git a/changelog.d/16433.misc b/changelog.d/16433.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16433.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/api/presence.py b/synapse/api/presence.py index b78f419994..afef6712e1 100644 --- a/synapse/api/presence.py +++ b/synapse/api/presence.py @@ -80,10 +80,6 @@ class UserPresenceState: def as_dict(self) -> JsonDict: return attr.asdict(self) - @staticmethod - def from_dict(d: JsonDict) -> "UserPresenceState": - return UserPresenceState(**d) - def copy_and_replace(self, **kwargs: Any) -> "UserPresenceState": return attr.evolve(self, **kwargs) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 6520795635..525968bcba 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -395,7 +395,7 @@ class PresenceDestinationsRow(BaseFederationRow): @staticmethod def from_data(data: JsonDict) -> "PresenceDestinationsRow": return PresenceDestinationsRow( - state=UserPresenceState.from_dict(data["state"]), destinations=data["dests"] + state=UserPresenceState(**data["state"]), destinations=data["dests"] ) def to_data(self) -> JsonDict: diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index e0ee55bd0e..8a617af599 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -198,7 +198,13 @@ class DestinationMembershipRestServlet(RestServlet): rooms, total = await self._store.get_destination_rooms_paginate( destination, start, limit, direction ) - response = {"rooms": rooms, "total": total} + response = { + "rooms": [ + {"room_id": room_id, "stream_ordering": stream_ordering} + for room_id, stream_ordering in rooms + ], + "total": total, + } if (start + limit) < total: response["next_token"] = str(start + len(rooms)) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index ca894edd5a..7d8af5c610 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -2418,7 +2418,7 @@ class DatabasePool: keyvalues: Optional[Dict[str, Any]] = None, exclude_keyvalues: Optional[Dict[str, Any]] = None, order_direction: str = "ASC", - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """ Executes a SELECT query on the named table with start and limit, of row numbers, which may return zero or number of rows from start to limit, @@ -2447,7 +2447,7 @@ class DatabasePool: order_direction: Whether the results should be ordered "ASC" or "DESC". Returns: - The result as a list of dictionaries. + The result as a list of tuples. """ if order_direction not in ["ASC", "DESC"]: raise ValueError("order_direction must be one of 'ASC' or 'DESC'.") @@ -2474,7 +2474,7 @@ class DatabasePool: ) txn.execute(sql, arg_list + [limit, start]) - return cls.cursor_to_dict(txn) + return txn.fetchall() async def simple_search_list( self, diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 805c23f89f..519f05fb60 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -20,6 +20,7 @@ from typing import ( Mapping, Optional, Tuple, + Union, cast, ) @@ -385,28 +386,47 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) limit = 100 offset = 0 while True: - rows = await self.db_pool.runInteraction( - "get_presence_for_all_users", - self.db_pool.simple_select_list_paginate_txn, - "presence_stream", - orderby="stream_id", - start=offset, - limit=limit, - exclude_keyvalues=exclude_keyvalues, - retcols=( - "user_id", - "state", - "last_active_ts", - "last_federation_update_ts", - "last_user_sync_ts", - "status_msg", - "currently_active", + rows = cast( + List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], + await self.db_pool.runInteraction( + "get_presence_for_all_users", + self.db_pool.simple_select_list_paginate_txn, + "presence_stream", + orderby="stream_id", + start=offset, + limit=limit, + exclude_keyvalues=exclude_keyvalues, + retcols=( + "user_id", + "state", + "last_active_ts", + "last_federation_update_ts", + "last_user_sync_ts", + "status_msg", + "currently_active", + ), + order_direction="ASC", ), - order_direction="ASC", ) - for row in rows: - users_to_state[row["user_id"]] = UserPresenceState(**row) + for ( + user_id, + state, + last_active_ts, + last_federation_update_ts, + last_user_sync_ts, + status_msg, + currently_active, + ) in rows: + users_to_state[user_id] = UserPresenceState( + user_id=user_id, + state=state, + last_active_ts=last_active_ts, + last_federation_update_ts=last_federation_update_ts, + last_user_sync_ts=last_user_sync_ts, + status_msg=status_msg, + currently_active=bool(currently_active), + ) # We've run out of updates to query if len(rows) < limit: diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 8f70eff809..f35757280d 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -526,7 +526,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): start: int, limit: int, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, int]], int]: """Function to retrieve a paginated list of destination's rooms. This will return a json list of rooms and the total number of rooms. @@ -537,12 +537,14 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): limit: number of rows to retrieve direction: sort ascending or descending by room_id Returns: - A tuple of a dict of rooms and a count of total rooms. + A tuple of a list of room tuples and a count of total rooms. + + Each room tuple is room_id, stream_ordering. """ def get_destination_rooms_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, int]], int]: if direction == Direction.BACKWARDS: order = "DESC" else: @@ -556,14 +558,17 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): txn.execute(sql, [destination]) count = cast(Tuple[int], txn.fetchone())[0] - rooms = self.db_pool.simple_select_list_paginate_txn( - txn=txn, - table="destination_rooms", - orderby="room_id", - start=start, - limit=limit, - retcols=("room_id", "stream_ordering"), - order_direction=order, + rooms = cast( + List[Tuple[str, int]], + self.db_pool.simple_select_list_paginate_txn( + txn=txn, + table="destination_rooms", + orderby="room_id", + start=start, + limit=limit, + retcols=("room_id", "stream_ordering"), + order_direction=order, + ), ) return rooms, count From 1f10c208068ef8788b6796c54a3604ae51caf951 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 6 Oct 2023 18:31:52 +0100 Subject: [PATCH 561/562] Apply join rate limiter outside the lineariser (#16441) --- changelog.d/16441.misc | 1 + synapse/handlers/room_member.py | 43 ++++++++++++++++++--------------- tests/rest/client/test_rooms.py | 24 ++++++++++++++++++ 3 files changed, 48 insertions(+), 20 deletions(-) create mode 100644 changelog.d/16441.misc diff --git a/changelog.d/16441.misc b/changelog.d/16441.misc new file mode 100644 index 0000000000..32264a62b2 --- /dev/null +++ b/changelog.d/16441.misc @@ -0,0 +1 @@ +Improve rate limiting logic. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 90343c2306..1b50495af1 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -382,8 +382,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): and persist a new event for the new membership change. Args: - requester: - target: + requester: User requesting the membership change, i.e. the sender of the + desired membership event. + target: Use whose membership should change, i.e. the state_key of the + desired membership event. room_id: membership: @@ -415,7 +417,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): Returns: Tuple of event ID and stream ordering position """ - user_id = target.to_string() if content is None: @@ -475,21 +476,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): (EventTypes.Member, user_id), None ) - if event.membership == Membership.JOIN: - newly_joined = True - if prev_member_event_id: - prev_member_event = await self.store.get_event( - prev_member_event_id - ) - newly_joined = prev_member_event.membership != Membership.JOIN - - # Only rate-limit if the user actually joined the room, otherwise we'll end - # up blocking profile updates. - if newly_joined and ratelimit: - await self._join_rate_limiter_local.ratelimit(requester) - await self._join_rate_per_room_limiter.ratelimit( - requester, key=room_id, update=False - ) with opentracing.start_active_span("handle_new_client_event"): result_event = ( await self.event_creation_handler.handle_new_client_event( @@ -618,6 +604,25 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): Raises: ShadowBanError if a shadow-banned requester attempts to send an invite. """ + if ratelimit: + if action == Membership.JOIN: + # Only rate-limit if the user isn't already joined to the room, otherwise + # we'll end up blocking profile updates. + ( + current_membership, + _, + ) = await self.store.get_local_current_membership_for_user_in_room( + requester.user.to_string(), + room_id, + ) + if current_membership != Membership.JOIN: + await self._join_rate_limiter_local.ratelimit(requester) + await self._join_rate_per_room_limiter.ratelimit( + requester, key=room_id, update=False + ) + elif action == Membership.INVITE: + await self.ratelimit_invite(requester, room_id, target.to_string()) + if action == Membership.INVITE and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. await self.clock.sleep(random.randint(1, 10)) @@ -794,8 +799,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if effective_membership_state == Membership.INVITE: target_id = target.to_string() - if ratelimit: - await self.ratelimit_invite(requester, room_id, target_id) # block any attempts to invite the server notices mxid if target_id == self._server_notices_mxid: diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 7627823d3f..aaa4f3bba0 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -1444,6 +1444,30 @@ class RoomJoinRatelimitTestCase(RoomBase): room_ids[3], joiner_user_id, expect_code=HTTPStatus.TOO_MANY_REQUESTS ) + @unittest.override_config( + {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} + ) + def test_join_attempts_local_ratelimit(self) -> None: + """Tests that unsuccessful joins that end up being denied are rate-limited.""" + # Create 4 rooms + room_ids = [ + self.helper.create_room_as(self.user_id, is_public=True) for _ in range(4) + ] + # Pre-emptively ban the user who will attempt to join. + joiner_user_id = self.register_user("joiner", "secret") + for room_id in room_ids: + self.helper.ban(room_id, self.user_id, joiner_user_id) + + # Now make a new user try to join some of them. + # The user can make 3 requests, each of which should be denied. + for room_id in room_ids[0:3]: + self.helper.join(room_id, joiner_user_id, expect_code=HTTPStatus.FORBIDDEN) + + # The fourth attempt should be rate limited. + self.helper.join( + room_ids[3], joiner_user_id, expect_code=HTTPStatus.TOO_MANY_REQUESTS + ) + @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) From 32fd9bc673ec025af5b49f4ed0961134a6101c38 Mon Sep 17 00:00:00 2001 From: Christoph <47949835+Sir-Photch@users.noreply.github.com> Date: Mon, 9 Oct 2023 02:16:07 -0700 Subject: [PATCH 562/562] Fix possible AttributeError when account-api is called over unix socket (#16404) Fixes #16396 --- changelog.d/16404.bugfix | 1 + synapse/api/auth/internal.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16404.bugfix diff --git a/changelog.d/16404.bugfix b/changelog.d/16404.bugfix new file mode 100644 index 0000000000..3fd5028b33 --- /dev/null +++ b/changelog.d/16404.bugfix @@ -0,0 +1 @@ +Fixes possbile `AttributeError` when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index a75f6f2cc4..36ee9c8b8f 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -115,7 +115,7 @@ class InternalAuth(BaseAuth): Once get_user_by_req has set up the opentracing span, this does the actual work. """ try: - ip_addr = request.getClientAddress().host + ip_addr = request.get_client_ip_if_available() user_agent = get_request_user_agent(request) access_token = self.get_access_token_from_request(request)