2018-07-20 08:32:23 -06:00
|
|
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
|
|
|
# Copyright 2017 - 2018 New Vector Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
import logging
|
2023-01-26 10:31:58 -07:00
|
|
|
from typing import TYPE_CHECKING, Dict, List, Optional, Set
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2021-09-20 06:56:23 -06:00
|
|
|
import attr
|
|
|
|
|
2018-07-20 08:32:23 -06:00
|
|
|
from twisted.python.failure import Failure
|
|
|
|
|
2023-01-27 05:27:55 -07:00
|
|
|
from synapse.api.constants import Direction, EventTypes, Membership
|
2018-07-20 08:32:23 -06:00
|
|
|
from synapse.api.errors import SynapseError
|
2020-08-28 14:47:11 -06:00
|
|
|
from synapse.api.filtering import Filter
|
2022-03-03 08:43:06 -07:00
|
|
|
from synapse.events.utils import SerializeEventConfig
|
2021-11-12 05:35:31 -07:00
|
|
|
from synapse.handlers.room import ShutdownRoomResponse
|
2023-08-16 08:19:54 -06:00
|
|
|
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
2022-08-03 09:57:38 -06:00
|
|
|
from synapse.logging.opentracing import trace
|
2019-11-04 10:09:22 -07:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2022-09-07 03:54:44 -06:00
|
|
|
from synapse.rest.admin._base import assert_user_is_admin
|
2020-08-28 14:47:11 -06:00
|
|
|
from synapse.streams.config import PaginationConfig
|
2023-01-26 10:31:58 -07:00
|
|
|
from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType
|
2022-12-12 09:19:30 -07:00
|
|
|
from synapse.types.state import StateFilter
|
2018-08-10 07:50:21 -06:00
|
|
|
from synapse.util.async_helpers import ReadWriteLock
|
2018-07-20 08:32:23 -06:00
|
|
|
from synapse.util.stringutils import random_string
|
|
|
|
from synapse.visibility import filter_events_for_client
|
|
|
|
|
2020-09-03 15:02:29 -06:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
|
|
|
|
2018-07-20 08:32:23 -06:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2023-06-13 11:31:08 -06:00
|
|
|
# How many single event gaps we tolerate returning in a `/messages` response before we
|
|
|
|
# backfill and try to fill in the history. This is an arbitrarily picked number so feel
|
|
|
|
# free to tune it in the future.
|
|
|
|
BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3
|
|
|
|
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2023-08-16 08:19:54 -06:00
|
|
|
# This is used to avoid purging a room several time at the same moment,
|
|
|
|
# and also paginating during a purge. Pagination can trigger backfill,
|
|
|
|
# which would create old events locally, and would potentially clash with the room delete.
|
|
|
|
PURGE_PAGINATION_LOCK_NAME = "purge_pagination_lock"
|
2023-07-31 03:58:03 -06:00
|
|
|
|
|
|
|
|
2021-09-20 06:56:23 -06:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-09-04 04:54:56 -06:00
|
|
|
class PurgeStatus:
|
2018-07-20 08:32:23 -06:00
|
|
|
"""Object tracking the status of a purge request
|
|
|
|
|
|
|
|
This class contains information on the progress of a purge request, for
|
|
|
|
return by get_purge_status.
|
|
|
|
"""
|
|
|
|
|
|
|
|
STATUS_ACTIVE = 0
|
|
|
|
STATUS_COMPLETE = 1
|
|
|
|
STATUS_FAILED = 2
|
|
|
|
|
|
|
|
STATUS_TEXT = {
|
|
|
|
STATUS_ACTIVE: "active",
|
|
|
|
STATUS_COMPLETE: "complete",
|
|
|
|
STATUS_FAILED: "failed",
|
|
|
|
}
|
|
|
|
|
2021-11-12 05:35:31 -07:00
|
|
|
# Save the error message if an error occurs
|
|
|
|
error: str = ""
|
|
|
|
|
2021-09-20 06:56:23 -06:00
|
|
|
# Tracks whether this request has completed. One of STATUS_{ACTIVE,COMPLETE,FAILED}.
|
|
|
|
status: int = STATUS_ACTIVE
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2021-09-20 06:56:23 -06:00
|
|
|
def asdict(self) -> JsonDict:
|
2021-11-12 05:35:31 -07:00
|
|
|
ret = {"status": PurgeStatus.STATUS_TEXT[self.status]}
|
|
|
|
if self.error:
|
|
|
|
ret["error"] = self.error
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
|
|
|
class DeleteStatus:
|
|
|
|
"""Object tracking the status of a delete room request
|
|
|
|
|
|
|
|
This class contains information on the progress of a delete room request, for
|
|
|
|
return by get_delete_status.
|
|
|
|
"""
|
|
|
|
|
|
|
|
STATUS_PURGING = 0
|
|
|
|
STATUS_COMPLETE = 1
|
|
|
|
STATUS_FAILED = 2
|
|
|
|
STATUS_SHUTTING_DOWN = 3
|
|
|
|
|
|
|
|
STATUS_TEXT = {
|
|
|
|
STATUS_PURGING: "purging",
|
|
|
|
STATUS_COMPLETE: "complete",
|
|
|
|
STATUS_FAILED: "failed",
|
|
|
|
STATUS_SHUTTING_DOWN: "shutting_down",
|
|
|
|
}
|
|
|
|
|
|
|
|
# Tracks whether this request has completed.
|
|
|
|
# One of STATUS_{PURGING,COMPLETE,FAILED,SHUTTING_DOWN}.
|
|
|
|
status: int = STATUS_PURGING
|
|
|
|
|
|
|
|
# Save the error message if an error occurs
|
|
|
|
error: str = ""
|
|
|
|
|
|
|
|
# Saves the result of an action to give it back to REST API
|
|
|
|
shutdown_room: ShutdownRoomResponse = {
|
|
|
|
"kicked_users": [],
|
|
|
|
"failed_to_kick_users": [],
|
|
|
|
"local_aliases": [],
|
|
|
|
"new_room_id": None,
|
|
|
|
}
|
|
|
|
|
|
|
|
def asdict(self) -> JsonDict:
|
|
|
|
ret = {
|
|
|
|
"status": DeleteStatus.STATUS_TEXT[self.status],
|
|
|
|
"shutdown_room": self.shutdown_room,
|
|
|
|
}
|
|
|
|
if self.error:
|
|
|
|
ret["error"] = self.error
|
|
|
|
return ret
|
2018-07-20 08:32:23 -06:00
|
|
|
|
|
|
|
|
2020-09-04 04:54:56 -06:00
|
|
|
class PaginationHandler:
|
2018-07-20 08:32:23 -06:00
|
|
|
"""Handles pagination and purge history requests.
|
|
|
|
|
|
|
|
These are in the same handler due to the fact we need to block clients
|
|
|
|
paginating during a purge.
|
|
|
|
"""
|
|
|
|
|
2021-11-12 05:35:31 -07:00
|
|
|
# when to remove a completed deletion/purge from the results map
|
|
|
|
CLEAR_PURGE_AFTER_MS = 1000 * 3600 * 24 # 24 hours
|
|
|
|
|
2020-09-03 15:02:29 -06:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2018-07-20 08:32:23 -06:00
|
|
|
self.hs = hs
|
|
|
|
self.auth = hs.get_auth()
|
2022-02-23 04:04:02 -07:00
|
|
|
self.store = hs.get_datastores().main
|
2022-05-31 06:17:50 -06:00
|
|
|
self._storage_controllers = hs.get_storage_controllers()
|
|
|
|
self._state_storage_controller = self._storage_controllers.state
|
2018-07-20 08:32:23 -06:00
|
|
|
self.clock = hs.get_clock()
|
2019-08-22 03:42:59 -06:00
|
|
|
self._server_name = hs.hostname
|
2021-11-12 05:35:31 -07:00
|
|
|
self._room_shutdown_handler = hs.get_room_shutdown_handler()
|
2022-03-18 11:49:32 -06:00
|
|
|
self._relations_handler = hs.get_relations_handler()
|
2023-07-31 03:58:03 -06:00
|
|
|
self._worker_locks = hs.get_worker_locks_handler()
|
2018-07-20 08:32:23 -06:00
|
|
|
|
|
|
|
self.pagination_lock = ReadWriteLock()
|
2021-11-12 05:35:31 -07:00
|
|
|
# IDs of rooms in which there currently an active purge *or delete* operation.
|
2021-07-16 11:22:36 -06:00
|
|
|
self._purges_in_progress_by_room: Set[str] = set()
|
2018-07-20 08:32:23 -06:00
|
|
|
# map from purge id to PurgeStatus
|
2021-07-16 11:22:36 -06:00
|
|
|
self._purges_by_id: Dict[str, PurgeStatus] = {}
|
2021-11-12 05:35:31 -07:00
|
|
|
# map from purge id to DeleteStatus
|
|
|
|
self._delete_by_id: Dict[str, DeleteStatus] = {}
|
|
|
|
# map from room id to delete ids
|
|
|
|
# Dict[`room_id`, List[`delete_id`]]
|
|
|
|
self._delete_by_room: Dict[str, List[str]] = {}
|
2019-05-09 06:21:57 -06:00
|
|
|
self._event_serializer = hs.get_event_client_serializer()
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2021-09-29 04:44:15 -06:00
|
|
|
self._retention_default_max_lifetime = (
|
2021-10-15 08:30:48 -06:00
|
|
|
hs.config.retention.retention_default_max_lifetime
|
2021-09-29 04:44:15 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
self._retention_allowed_lifetime_min = (
|
2021-10-15 08:30:48 -06:00
|
|
|
hs.config.retention.retention_allowed_lifetime_min
|
2021-09-29 04:44:15 -06:00
|
|
|
)
|
|
|
|
self._retention_allowed_lifetime_max = (
|
2021-10-15 08:30:48 -06:00
|
|
|
hs.config.retention.retention_allowed_lifetime_max
|
2021-09-29 04:44:15 -06:00
|
|
|
)
|
2022-08-26 01:38:10 -06:00
|
|
|
self._is_master = hs.config.worker.worker_app is None
|
2021-09-29 04:44:15 -06:00
|
|
|
|
2022-08-26 01:38:10 -06:00
|
|
|
if hs.config.retention.retention_enabled and self._is_master:
|
2019-11-04 10:09:22 -07:00
|
|
|
# Run the purge jobs described in the configuration file.
|
2021-10-15 08:30:48 -06:00
|
|
|
for job in hs.config.retention.retention_purge_jobs:
|
2020-01-17 13:51:44 -07:00
|
|
|
logger.info("Setting up purge job with config: %s", job)
|
|
|
|
|
2019-11-04 10:09:22 -07:00
|
|
|
self.clock.looping_call(
|
|
|
|
run_as_background_process,
|
2021-09-28 07:24:40 -06:00
|
|
|
job.interval,
|
2019-11-04 10:09:22 -07:00
|
|
|
"purge_history_for_rooms_in_range",
|
|
|
|
self.purge_history_for_rooms_in_range,
|
2021-09-28 07:24:40 -06:00
|
|
|
job.shortest_max_lifetime,
|
|
|
|
job.longest_max_lifetime,
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
|
2020-09-03 15:02:29 -06:00
|
|
|
async def purge_history_for_rooms_in_range(
|
|
|
|
self, min_ms: Optional[int], max_ms: Optional[int]
|
2021-09-20 06:56:23 -06:00
|
|
|
) -> None:
|
2019-11-04 10:09:22 -07:00
|
|
|
"""Purge outdated events from rooms within the given retention range.
|
|
|
|
|
|
|
|
If a default retention policy is defined in the server's configuration and its
|
|
|
|
'max_lifetime' is within this range, also targets rooms which don't have a
|
|
|
|
retention policy.
|
|
|
|
|
|
|
|
Args:
|
2020-09-03 15:02:29 -06:00
|
|
|
min_ms: Duration in milliseconds that define the lower limit of
|
2019-11-04 10:09:22 -07:00
|
|
|
the range to handle (exclusive). If None, it means that the range has no
|
|
|
|
lower limit.
|
2020-09-03 15:02:29 -06:00
|
|
|
max_ms: Duration in milliseconds that define the upper limit of
|
2019-11-04 10:09:22 -07:00
|
|
|
the range to handle (inclusive). If None, it means that the range has no
|
|
|
|
upper limit.
|
|
|
|
"""
|
2020-08-24 11:21:04 -06:00
|
|
|
# We want the storage layer to include rooms with no retention policy in its
|
2019-11-04 10:09:22 -07:00
|
|
|
# return value only if a default retention policy is defined in the server's
|
|
|
|
# configuration and that policy's 'max_lifetime' is either lower (or equal) than
|
|
|
|
# max_ms or higher than min_ms (or both).
|
|
|
|
if self._retention_default_max_lifetime is not None:
|
|
|
|
include_null = True
|
|
|
|
|
|
|
|
if min_ms is not None and min_ms >= self._retention_default_max_lifetime:
|
|
|
|
# The default max_lifetime is lower than (or equal to) min_ms.
|
|
|
|
include_null = False
|
|
|
|
|
|
|
|
if max_ms is not None and max_ms < self._retention_default_max_lifetime:
|
|
|
|
# The default max_lifetime is higher than max_ms.
|
|
|
|
include_null = False
|
|
|
|
else:
|
|
|
|
include_null = False
|
|
|
|
|
2020-01-17 13:51:44 -07:00
|
|
|
logger.info(
|
2020-02-18 10:29:57 -07:00
|
|
|
"[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)",
|
2020-01-17 13:51:44 -07:00
|
|
|
min_ms,
|
|
|
|
max_ms,
|
|
|
|
include_null,
|
|
|
|
)
|
|
|
|
|
2020-06-16 06:06:17 -06:00
|
|
|
rooms = await self.store.get_rooms_for_retention_period_in_range(
|
2019-11-04 10:09:22 -07:00
|
|
|
min_ms, max_ms, include_null
|
|
|
|
)
|
|
|
|
|
2020-01-17 13:51:44 -07:00
|
|
|
logger.debug("[purge] Rooms to purge: %s", rooms)
|
|
|
|
|
2020-06-15 05:03:36 -06:00
|
|
|
for room_id, retention_policy in rooms.items():
|
2020-01-17 13:51:44 -07:00
|
|
|
logger.info("[purge] Attempting to purge messages in room %s", room_id)
|
|
|
|
|
2019-11-04 10:09:22 -07:00
|
|
|
if room_id in self._purges_in_progress_by_room:
|
|
|
|
logger.warning(
|
|
|
|
"[purge] not purging room %s as there's an ongoing purge running"
|
|
|
|
" for this room",
|
|
|
|
room_id,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
2020-08-24 11:21:04 -06:00
|
|
|
# If max_lifetime is None, it means that the room has no retention policy.
|
|
|
|
# Given we only retrieve such rooms when there's a default retention policy
|
|
|
|
# defined in the server's configuration, we can safely assume that's the
|
|
|
|
# case and use it for this room.
|
|
|
|
max_lifetime = (
|
2022-05-23 11:18:23 -06:00
|
|
|
retention_policy.max_lifetime or self._retention_default_max_lifetime
|
2020-08-24 11:21:04 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
# Cap the effective max_lifetime to be within the range allowed in the
|
|
|
|
# config.
|
|
|
|
# We do this in two steps:
|
|
|
|
# 1. Make sure it's higher or equal to the minimum allowed value, and if
|
|
|
|
# it's not replace it with that value. This is because the server
|
|
|
|
# operator can be required to not delete information before a given
|
|
|
|
# time, e.g. to comply with freedom of information laws.
|
|
|
|
# 2. Make sure the resulting value is lower or equal to the maximum allowed
|
|
|
|
# value, and if it's not replace it with that value. This is because the
|
|
|
|
# server operator can be required to delete any data after a specific
|
|
|
|
# amount of time.
|
|
|
|
if self._retention_allowed_lifetime_min is not None:
|
|
|
|
max_lifetime = max(self._retention_allowed_lifetime_min, max_lifetime)
|
|
|
|
|
|
|
|
if self._retention_allowed_lifetime_max is not None:
|
|
|
|
max_lifetime = min(max_lifetime, self._retention_allowed_lifetime_max)
|
|
|
|
|
|
|
|
logger.debug("[purge] max_lifetime for room %s: %s", room_id, max_lifetime)
|
2019-11-04 10:09:22 -07:00
|
|
|
|
|
|
|
# Figure out what token we should start purging at.
|
|
|
|
ts = self.clock.time_msec() - max_lifetime
|
|
|
|
|
2020-06-16 06:06:17 -06:00
|
|
|
stream_ordering = await self.store.find_first_stream_ordering_after_ts(ts)
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2020-06-16 06:06:17 -06:00
|
|
|
r = await self.store.get_room_event_before_stream_ordering(
|
2019-11-19 06:22:37 -07:00
|
|
|
room_id,
|
|
|
|
stream_ordering,
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
if not r:
|
|
|
|
logger.warning(
|
|
|
|
"[purge] purging events not possible: No event found "
|
|
|
|
"(ts %i => stream_ordering %i)",
|
2019-11-19 06:22:37 -07:00
|
|
|
ts,
|
|
|
|
stream_ordering,
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
|
|
|
(stream, topo, _event_id) = r
|
|
|
|
token = "t%d-%d" % (topo, stream)
|
|
|
|
|
|
|
|
purge_id = random_string(16)
|
|
|
|
|
|
|
|
self._purges_by_id[purge_id] = PurgeStatus()
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
"Starting purging events in room %s (purge_id %s)" % (room_id, purge_id)
|
|
|
|
)
|
|
|
|
|
|
|
|
# We want to purge everything, including local events, and to run the purge in
|
|
|
|
# the background so that it's not blocking any other operation apart from
|
|
|
|
# other purges in the same room.
|
|
|
|
run_as_background_process(
|
2019-11-19 06:22:37 -07:00
|
|
|
"_purge_history",
|
|
|
|
self._purge_history,
|
|
|
|
purge_id,
|
|
|
|
room_id,
|
|
|
|
token,
|
|
|
|
True,
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
|
2020-09-03 15:02:29 -06:00
|
|
|
def start_purge_history(
|
|
|
|
self, room_id: str, token: str, delete_local_events: bool = False
|
|
|
|
) -> str:
|
2018-07-20 08:32:23 -06:00
|
|
|
"""Start off a history purge on a room.
|
|
|
|
|
|
|
|
Args:
|
2020-09-03 15:02:29 -06:00
|
|
|
room_id: The room to purge from
|
|
|
|
token: topological token to delete events before
|
|
|
|
delete_local_events: True to delete local events as well as
|
2018-07-20 08:32:23 -06:00
|
|
|
remote ones
|
|
|
|
|
|
|
|
Returns:
|
2020-09-03 15:02:29 -06:00
|
|
|
unique ID for this purge transaction.
|
2018-07-20 08:32:23 -06:00
|
|
|
"""
|
|
|
|
if room_id in self._purges_in_progress_by_room:
|
|
|
|
raise SynapseError(
|
|
|
|
400, "History purge already in progress for %s" % (room_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
purge_id = random_string(16)
|
|
|
|
|
|
|
|
# we log the purge_id here so that it can be tied back to the
|
|
|
|
# request id in the log lines.
|
|
|
|
logger.info("[purge] starting purge_id %s", purge_id)
|
|
|
|
|
|
|
|
self._purges_by_id[purge_id] = PurgeStatus()
|
2021-11-12 05:35:31 -07:00
|
|
|
run_as_background_process(
|
|
|
|
"purge_history",
|
|
|
|
self._purge_history,
|
|
|
|
purge_id,
|
|
|
|
room_id,
|
|
|
|
token,
|
|
|
|
delete_local_events,
|
2018-07-20 08:32:23 -06:00
|
|
|
)
|
|
|
|
return purge_id
|
|
|
|
|
2020-08-28 14:47:11 -06:00
|
|
|
async def _purge_history(
|
|
|
|
self, purge_id: str, room_id: str, token: str, delete_local_events: bool
|
|
|
|
) -> None:
|
2018-07-20 08:32:23 -06:00
|
|
|
"""Carry out a history purge on a room.
|
|
|
|
|
|
|
|
Args:
|
2021-11-12 05:35:31 -07:00
|
|
|
purge_id: The ID for this purge.
|
2020-08-28 14:47:11 -06:00
|
|
|
room_id: The room to purge from
|
|
|
|
token: topological token to delete events before
|
|
|
|
delete_local_events: True to delete local events as well as remote ones
|
2018-07-20 08:32:23 -06:00
|
|
|
"""
|
|
|
|
self._purges_in_progress_by_room.add(room_id)
|
|
|
|
try:
|
2023-07-31 03:58:03 -06:00
|
|
|
async with self._worker_locks.acquire_read_write_lock(
|
2023-08-16 08:19:54 -06:00
|
|
|
PURGE_PAGINATION_LOCK_NAME, room_id, write=True
|
2023-07-31 03:58:03 -06:00
|
|
|
):
|
2022-05-31 06:17:50 -06:00
|
|
|
await self._storage_controllers.purge_events.purge_history(
|
2019-10-30 09:12:49 -06:00
|
|
|
room_id, token, delete_local_events
|
|
|
|
)
|
2018-07-20 08:32:23 -06:00
|
|
|
logger.info("[purge] complete")
|
|
|
|
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
|
|
|
|
except Exception:
|
2019-02-25 09:56:41 -07:00
|
|
|
f = Failure()
|
|
|
|
logger.error(
|
2023-05-31 05:18:29 -06:00
|
|
|
"[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject())
|
2019-02-25 09:56:41 -07:00
|
|
|
)
|
2018-07-20 08:32:23 -06:00
|
|
|
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
|
2021-11-12 05:35:31 -07:00
|
|
|
self._purges_by_id[purge_id].error = f.getErrorMessage()
|
2018-07-20 08:32:23 -06:00
|
|
|
finally:
|
|
|
|
self._purges_in_progress_by_room.discard(room_id)
|
|
|
|
|
|
|
|
# remove the purge from the list 24 hours after it completes
|
2021-09-20 06:56:23 -06:00
|
|
|
def clear_purge() -> None:
|
2018-07-20 08:32:23 -06:00
|
|
|
del self._purges_by_id[purge_id]
|
2019-06-20 03:32:02 -06:00
|
|
|
|
2021-11-12 05:35:31 -07:00
|
|
|
self.hs.get_reactor().callLater(
|
|
|
|
PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_purge
|
|
|
|
)
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2020-09-03 15:02:29 -06:00
|
|
|
def get_purge_status(self, purge_id: str) -> Optional[PurgeStatus]:
|
2018-07-20 08:32:23 -06:00
|
|
|
"""Get the current status of an active purge
|
|
|
|
|
|
|
|
Args:
|
2020-09-03 15:02:29 -06:00
|
|
|
purge_id: purge_id returned by start_purge_history
|
2018-07-20 08:32:23 -06:00
|
|
|
"""
|
|
|
|
return self._purges_by_id.get(purge_id)
|
|
|
|
|
2021-11-12 05:35:31 -07:00
|
|
|
def get_delete_status(self, delete_id: str) -> Optional[DeleteStatus]:
|
|
|
|
"""Get the current status of an active deleting
|
|
|
|
|
|
|
|
Args:
|
|
|
|
delete_id: delete_id returned by start_shutdown_and_purge_room
|
|
|
|
"""
|
|
|
|
return self._delete_by_id.get(delete_id)
|
|
|
|
|
2023-01-26 10:31:58 -07:00
|
|
|
def get_delete_ids_by_room(self, room_id: str) -> Optional[StrCollection]:
|
2021-11-12 05:35:31 -07:00
|
|
|
"""Get all active delete ids by room
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: room_id that is deleted
|
|
|
|
"""
|
|
|
|
return self._delete_by_room.get(room_id)
|
|
|
|
|
2020-11-30 09:48:12 -07:00
|
|
|
async def purge_room(self, room_id: str, force: bool = False) -> None:
|
|
|
|
"""Purge the given room from the database.
|
2021-11-12 05:35:31 -07:00
|
|
|
This function is part the delete room v1 API.
|
2020-11-30 09:48:12 -07:00
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: room to be purged
|
|
|
|
force: set true to skip checking for joined users.
|
|
|
|
"""
|
2023-07-31 03:58:03 -06:00
|
|
|
async with self._worker_locks.acquire_multi_read_write_lock(
|
2023-08-16 08:19:54 -06:00
|
|
|
[
|
|
|
|
(PURGE_PAGINATION_LOCK_NAME, room_id),
|
|
|
|
(NEW_EVENT_DURING_PURGE_LOCK_NAME, room_id),
|
|
|
|
],
|
2023-07-31 03:58:03 -06:00
|
|
|
write=True,
|
|
|
|
):
|
2019-08-22 03:42:59 -06:00
|
|
|
# first check that we have no users in this room
|
2020-11-30 09:48:12 -07:00
|
|
|
if not force:
|
|
|
|
joined = await self.store.is_host_joined(room_id, self._server_name)
|
|
|
|
if joined:
|
|
|
|
raise SynapseError(400, "Users are still joined to this room")
|
2019-08-22 03:42:59 -06:00
|
|
|
|
2022-05-31 06:17:50 -06:00
|
|
|
await self._storage_controllers.purge_events.purge_room(room_id)
|
2019-08-22 03:42:59 -06:00
|
|
|
|
2022-08-03 09:57:38 -06:00
|
|
|
@trace
|
2019-12-10 09:54:34 -07:00
|
|
|
async def get_messages(
|
2018-07-20 08:32:23 -06:00
|
|
|
self,
|
2020-08-28 14:47:11 -06:00
|
|
|
requester: Requester,
|
2020-09-03 15:02:29 -06:00
|
|
|
room_id: str,
|
|
|
|
pagin_config: PaginationConfig,
|
2020-08-28 14:47:11 -06:00
|
|
|
as_client_event: bool = True,
|
|
|
|
event_filter: Optional[Filter] = None,
|
2022-09-07 03:54:44 -06:00
|
|
|
use_admin_priviledge: bool = False,
|
2022-03-16 08:39:15 -06:00
|
|
|
) -> JsonDict:
|
2018-07-20 08:32:23 -06:00
|
|
|
"""Get messages in a room.
|
|
|
|
|
|
|
|
Args:
|
2020-08-28 14:47:11 -06:00
|
|
|
requester: The user requesting messages.
|
|
|
|
room_id: The room they want messages from.
|
|
|
|
pagin_config: The pagination config rules to apply, if any.
|
|
|
|
as_client_event: True to get events in client-server format.
|
|
|
|
event_filter: Filter to apply to results or None
|
2022-09-07 03:54:44 -06:00
|
|
|
use_admin_priviledge: if `True`, return all events, regardless
|
|
|
|
of whether `user` has access to them. To be used **ONLY**
|
|
|
|
from the admin API.
|
2022-03-16 08:39:15 -06:00
|
|
|
|
2018-07-20 08:32:23 -06:00
|
|
|
Returns:
|
2020-08-28 14:47:11 -06:00
|
|
|
Pagination API results
|
2018-07-20 08:32:23 -06:00
|
|
|
"""
|
2022-09-07 03:54:44 -06:00
|
|
|
if use_admin_priviledge:
|
|
|
|
await assert_user_is_admin(self.auth, requester)
|
|
|
|
|
2018-07-20 08:32:23 -06:00
|
|
|
user_id = requester.user.to_string()
|
|
|
|
|
|
|
|
if pagin_config.from_token:
|
2020-09-08 08:00:17 -06:00
|
|
|
from_token = pagin_config.from_token
|
2023-01-27 05:27:55 -07:00
|
|
|
elif pagin_config.direction == Direction.FORWARDS:
|
2022-11-24 02:10:51 -07:00
|
|
|
from_token = (
|
|
|
|
await self.hs.get_event_sources().get_start_token_for_pagination(
|
|
|
|
room_id
|
|
|
|
)
|
|
|
|
)
|
2018-07-20 08:32:23 -06:00
|
|
|
else:
|
2022-04-06 04:40:28 -06:00
|
|
|
from_token = (
|
|
|
|
await self.hs.get_event_sources().get_current_token_for_pagination(
|
|
|
|
room_id
|
|
|
|
)
|
|
|
|
)
|
|
|
|
# We expect `/messages` to use historic pagination tokens by default but
|
|
|
|
# `/messages` should still works with live tokens when manually provided.
|
2022-05-10 22:39:14 -06:00
|
|
|
assert from_token.room_key.topological is not None
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2020-09-11 05:22:55 -06:00
|
|
|
room_token = from_token.room_key
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
(membership, member_event_id) = (None, None)
|
|
|
|
if not use_admin_priviledge:
|
|
|
|
(
|
|
|
|
membership,
|
|
|
|
member_event_id,
|
|
|
|
) = await self.auth.check_user_in_room_or_world_readable(
|
|
|
|
room_id, requester, allow_departed_users=True
|
|
|
|
)
|
|
|
|
|
|
|
|
if pagin_config.direction == Direction.BACKWARDS:
|
|
|
|
# if we're going backwards, we might need to backfill. This
|
|
|
|
# requires that we have a topo token.
|
|
|
|
if room_token.topological:
|
|
|
|
curr_topo = room_token.topological
|
|
|
|
else:
|
|
|
|
curr_topo = await self.store.get_current_topological_token(
|
|
|
|
room_id, room_token.stream
|
2022-09-07 03:54:44 -06:00
|
|
|
)
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
# If they have left the room then clamp the token to be before
|
|
|
|
# they left the room, to save the effort of loading from the
|
|
|
|
# database.
|
|
|
|
if (
|
|
|
|
pagin_config.direction == Direction.BACKWARDS
|
|
|
|
and not use_admin_priviledge
|
|
|
|
and membership == Membership.LEAVE
|
|
|
|
):
|
|
|
|
# This is only None if the room is world_readable, in which case
|
|
|
|
# "Membership.JOIN" would have been returned and we should never hit
|
|
|
|
# this branch.
|
|
|
|
assert member_event_id
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
leave_token = await self.store.get_topological_token_for_event(
|
|
|
|
member_event_id
|
|
|
|
)
|
|
|
|
assert leave_token.topological is not None
|
2023-06-13 11:31:08 -06:00
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
if leave_token.topological < curr_topo:
|
|
|
|
from_token = from_token.copy_and_replace(
|
|
|
|
StreamKeyType.ROOM, leave_token
|
2023-06-13 11:31:08 -06:00
|
|
|
)
|
2020-09-03 15:02:29 -06:00
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
to_room_key = None
|
|
|
|
if pagin_config.to_token:
|
|
|
|
to_room_key = pagin_config.to_token.room_key
|
|
|
|
|
|
|
|
# Initially fetch the events from the database. With any luck, we can return
|
|
|
|
# these without blocking on backfill (handled below).
|
|
|
|
events, next_key = await self.store.paginate_room_events(
|
|
|
|
room_id=room_id,
|
|
|
|
from_key=from_token.room_key,
|
|
|
|
to_key=to_room_key,
|
|
|
|
direction=pagin_config.direction,
|
|
|
|
limit=pagin_config.limit,
|
|
|
|
event_filter=event_filter,
|
|
|
|
)
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
if pagin_config.direction == Direction.BACKWARDS:
|
|
|
|
# We use a `Set` because there can be multiple events at a given depth
|
|
|
|
# and we only care about looking at the unique continum of depths to
|
|
|
|
# find gaps.
|
|
|
|
event_depths: Set[int] = {event.depth for event in events}
|
|
|
|
sorted_event_depths = sorted(event_depths)
|
|
|
|
|
|
|
|
# Inspect the depths of the returned events to see if there are any gaps
|
|
|
|
found_big_gap = False
|
|
|
|
number_of_gaps = 0
|
|
|
|
previous_event_depth = (
|
|
|
|
sorted_event_depths[0] if len(sorted_event_depths) > 0 else 0
|
2018-07-20 08:32:23 -06:00
|
|
|
)
|
2023-08-23 02:23:41 -06:00
|
|
|
for event_depth in sorted_event_depths:
|
|
|
|
# We don't expect a negative depth but we'll just deal with it in
|
|
|
|
# any case by taking the absolute value to get the true gap between
|
|
|
|
# any two integers.
|
|
|
|
depth_gap = abs(event_depth - previous_event_depth)
|
|
|
|
# A `depth_gap` of 1 is a normal continuous chain to the next event
|
|
|
|
# (1 <-- 2 <-- 3) so anything larger indicates a missing event (it's
|
|
|
|
# also possible there is no event at a given depth but we can't ever
|
|
|
|
# know that for sure)
|
|
|
|
if depth_gap > 1:
|
|
|
|
number_of_gaps += 1
|
|
|
|
|
|
|
|
# We only tolerate a small number single-event long gaps in the
|
|
|
|
# returned events because those are most likely just events we've
|
|
|
|
# failed to pull in the past. Anything longer than that is probably
|
|
|
|
# a sign that we're missing a decent chunk of history and we should
|
|
|
|
# try to backfill it.
|
|
|
|
#
|
|
|
|
# XXX: It's possible we could tolerate longer gaps if we checked
|
|
|
|
# that a given events `prev_events` is one that has failed pull
|
|
|
|
# attempts and we could just treat it like a dead branch of history
|
|
|
|
# for now or at least something that we don't need the block the
|
|
|
|
# client on to try pulling.
|
|
|
|
#
|
|
|
|
# XXX: If we had something like MSC3871 to indicate gaps in the
|
|
|
|
# timeline to the client, we could also get away with any sized gap
|
|
|
|
# and just have the client refetch the holes as they see fit.
|
|
|
|
if depth_gap > 2:
|
|
|
|
found_big_gap = True
|
|
|
|
break
|
|
|
|
previous_event_depth = event_depth
|
|
|
|
|
|
|
|
# Backfill in the foreground if we found a big gap, have too many holes,
|
|
|
|
# or we don't have enough events to fill the limit that the client asked
|
|
|
|
# for.
|
|
|
|
missing_too_many_events = (
|
|
|
|
number_of_gaps > BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD
|
|
|
|
)
|
|
|
|
not_enough_events_to_fill_response = len(events) < pagin_config.limit
|
|
|
|
if (
|
|
|
|
found_big_gap
|
|
|
|
or missing_too_many_events
|
|
|
|
or not_enough_events_to_fill_response
|
|
|
|
):
|
|
|
|
did_backfill = await self.hs.get_federation_handler().maybe_backfill(
|
|
|
|
room_id,
|
|
|
|
curr_topo,
|
|
|
|
limit=pagin_config.limit,
|
2023-06-13 11:31:08 -06:00
|
|
|
)
|
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
# If we did backfill something, refetch the events from the database to
|
|
|
|
# catch anything new that might have been added since we last fetched.
|
|
|
|
if did_backfill:
|
|
|
|
events, next_key = await self.store.paginate_room_events(
|
|
|
|
room_id=room_id,
|
|
|
|
from_key=from_token.room_key,
|
|
|
|
to_key=to_room_key,
|
|
|
|
direction=pagin_config.direction,
|
2023-06-13 11:31:08 -06:00
|
|
|
limit=pagin_config.limit,
|
2023-08-23 02:23:41 -06:00
|
|
|
event_filter=event_filter,
|
2023-06-13 11:31:08 -06:00
|
|
|
)
|
2023-08-23 02:23:41 -06:00
|
|
|
else:
|
|
|
|
# Otherwise, we can backfill in the background for eventual
|
|
|
|
# consistency's sake but we don't need to block the client waiting
|
|
|
|
# for a costly federation call and processing.
|
|
|
|
run_as_background_process(
|
|
|
|
"maybe_backfill_in_the_background",
|
|
|
|
self.hs.get_federation_handler().maybe_backfill,
|
|
|
|
room_id,
|
|
|
|
curr_topo,
|
|
|
|
limit=pagin_config.limit,
|
|
|
|
)
|
2023-06-13 11:31:08 -06:00
|
|
|
|
2023-08-23 02:23:41 -06:00
|
|
|
next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key)
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2022-05-30 14:03:52 -06:00
|
|
|
# if no events are returned from pagination, that implies
|
|
|
|
# we have reached the end of the available events.
|
|
|
|
# In that case we do not return end, to tell the client
|
|
|
|
# there is no need for further queries.
|
|
|
|
if not events:
|
|
|
|
return {
|
|
|
|
"chunk": [],
|
|
|
|
"start": await from_token.to_string(self.store),
|
|
|
|
}
|
2019-01-02 16:37:39 -07:00
|
|
|
|
2022-05-30 14:03:52 -06:00
|
|
|
if event_filter:
|
|
|
|
events = await event_filter.filter(events)
|
|
|
|
|
2022-09-07 03:54:44 -06:00
|
|
|
if not use_admin_priviledge:
|
|
|
|
events = await filter_events_for_client(
|
|
|
|
self._storage_controllers,
|
|
|
|
user_id,
|
|
|
|
events,
|
|
|
|
is_peeking=(member_event_id is None),
|
|
|
|
)
|
2019-01-02 16:37:39 -07:00
|
|
|
|
2022-05-30 14:03:52 -06:00
|
|
|
# if after the filter applied there are no more events
|
|
|
|
# return immediately - but there might be more in next_token batch
|
2018-07-20 08:32:23 -06:00
|
|
|
if not events:
|
2019-07-23 07:00:55 -06:00
|
|
|
return {
|
|
|
|
"chunk": [],
|
2020-09-30 13:29:19 -06:00
|
|
|
"start": await from_token.to_string(self.store),
|
|
|
|
"end": await next_token.to_string(self.store),
|
2019-07-23 07:00:55 -06:00
|
|
|
}
|
2018-07-20 08:32:23 -06:00
|
|
|
|
2018-08-16 07:22:47 -06:00
|
|
|
state = None
|
2021-10-27 09:26:30 -06:00
|
|
|
if event_filter and event_filter.lazy_load_members and len(events) > 0:
|
2018-08-16 07:22:47 -06:00
|
|
|
# TODO: remove redundant members
|
|
|
|
|
2018-10-25 10:49:55 -06:00
|
|
|
# FIXME: we also care about invite targets etc.
|
|
|
|
state_filter = StateFilter.from_types(
|
|
|
|
(EventTypes.Member, event.sender) for event in events
|
|
|
|
)
|
2018-08-16 07:22:47 -06:00
|
|
|
|
2022-05-31 06:17:50 -06:00
|
|
|
state_ids = await self._state_storage_controller.get_state_ids_for_event(
|
2018-10-25 10:49:55 -06:00
|
|
|
events[0].event_id, state_filter=state_filter
|
2018-08-16 07:22:47 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
if state_ids:
|
2020-09-03 15:02:29 -06:00
|
|
|
state_dict = await self.store.get_events(list(state_ids.values()))
|
|
|
|
state = state_dict.values()
|
2018-08-16 07:22:47 -06:00
|
|
|
|
2022-03-18 11:49:32 -06:00
|
|
|
aggregations = await self._relations_handler.get_bundled_aggregations(
|
|
|
|
events, user_id
|
|
|
|
)
|
2022-01-07 07:10:46 -07:00
|
|
|
|
2018-07-20 08:32:23 -06:00
|
|
|
time_now = self.clock.time_msec()
|
|
|
|
|
2023-03-06 09:08:39 -07:00
|
|
|
serialize_options = SerializeEventConfig(
|
|
|
|
as_client_event=as_client_event, requester=requester
|
|
|
|
)
|
2022-03-03 08:43:06 -07:00
|
|
|
|
2018-07-20 08:32:23 -06:00
|
|
|
chunk = {
|
2019-05-09 06:21:57 -06:00
|
|
|
"chunk": (
|
2022-01-07 07:10:46 -07:00
|
|
|
self._event_serializer.serialize_events(
|
2021-12-20 12:14:38 -07:00
|
|
|
events,
|
|
|
|
time_now,
|
2022-03-03 08:43:06 -07:00
|
|
|
config=serialize_options,
|
2022-01-07 07:10:46 -07:00
|
|
|
bundle_aggregations=aggregations,
|
2019-05-09 06:21:57 -06:00
|
|
|
)
|
|
|
|
),
|
2020-09-30 13:29:19 -06:00
|
|
|
"start": await from_token.to_string(self.store),
|
|
|
|
"end": await next_token.to_string(self.store),
|
2018-07-20 08:32:23 -06:00
|
|
|
}
|
|
|
|
|
2018-08-16 07:22:47 -06:00
|
|
|
if state:
|
2022-01-07 07:10:46 -07:00
|
|
|
chunk["state"] = self._event_serializer.serialize_events(
|
2022-03-03 08:43:06 -07:00
|
|
|
state, time_now, config=serialize_options
|
2019-05-09 06:21:57 -06:00
|
|
|
)
|
2018-08-16 07:22:47 -06:00
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return chunk
|
2021-11-12 05:35:31 -07:00
|
|
|
|
|
|
|
async def _shutdown_and_purge_room(
|
|
|
|
self,
|
|
|
|
delete_id: str,
|
|
|
|
room_id: str,
|
2023-09-06 04:50:07 -06:00
|
|
|
requester_user_id: Optional[str],
|
2021-11-12 05:35:31 -07:00
|
|
|
new_room_user_id: Optional[str] = None,
|
|
|
|
new_room_name: Optional[str] = None,
|
|
|
|
message: Optional[str] = None,
|
|
|
|
block: bool = False,
|
|
|
|
purge: bool = True,
|
|
|
|
force_purge: bool = False,
|
|
|
|
) -> None:
|
|
|
|
"""
|
|
|
|
Shuts down and purges a room.
|
|
|
|
|
|
|
|
See `RoomShutdownHandler.shutdown_room` for details of creation of the new room
|
|
|
|
|
|
|
|
Args:
|
|
|
|
delete_id: The ID for this delete.
|
|
|
|
room_id: The ID of the room to shut down.
|
|
|
|
requester_user_id:
|
|
|
|
User who requested the action. Will be recorded as putting the room on the
|
|
|
|
blocking list.
|
2023-09-06 04:50:07 -06:00
|
|
|
If None, the action was not manually requested but instead
|
|
|
|
triggered automatically, e.g. through a Synapse module
|
|
|
|
or some other policy.
|
|
|
|
MUST NOT be None if block=True.
|
2021-11-12 05:35:31 -07:00
|
|
|
new_room_user_id:
|
|
|
|
If set, a new room will be created with this user ID
|
|
|
|
as the creator and admin, and all users in the old room will be
|
|
|
|
moved into that room. If not set, no new room will be created
|
|
|
|
and the users will just be removed from the old room.
|
|
|
|
new_room_name:
|
|
|
|
A string representing the name of the room that new users will
|
|
|
|
be invited to. Defaults to `Content Violation Notification`
|
|
|
|
message:
|
|
|
|
A string containing the first message that will be sent as
|
|
|
|
`new_room_user_id` in the new room. Ideally this will clearly
|
|
|
|
convey why the original room was shut down.
|
|
|
|
Defaults to `Sharing illegal content on this server is not
|
|
|
|
permitted and rooms in violation will be blocked.`
|
|
|
|
block:
|
|
|
|
If set to `true`, this room will be added to a blocking list,
|
|
|
|
preventing future attempts to join the room. Defaults to `false`.
|
|
|
|
purge:
|
|
|
|
If set to `true`, purge the given room from the database.
|
|
|
|
force_purge:
|
|
|
|
If set to `true`, the room will be purged from database
|
|
|
|
also if it fails to remove some users from room.
|
|
|
|
|
|
|
|
Saves a `RoomShutdownHandler.ShutdownRoomResponse` in `DeleteStatus`:
|
|
|
|
"""
|
|
|
|
|
|
|
|
self._purges_in_progress_by_room.add(room_id)
|
|
|
|
try:
|
2023-07-31 03:58:03 -06:00
|
|
|
async with self._worker_locks.acquire_read_write_lock(
|
2023-08-16 08:19:54 -06:00
|
|
|
PURGE_PAGINATION_LOCK_NAME, room_id, write=True
|
2023-07-31 03:58:03 -06:00
|
|
|
):
|
2021-11-12 05:35:31 -07:00
|
|
|
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
|
|
|
|
self._delete_by_id[
|
|
|
|
delete_id
|
|
|
|
].shutdown_room = await self._room_shutdown_handler.shutdown_room(
|
|
|
|
room_id=room_id,
|
|
|
|
requester_user_id=requester_user_id,
|
|
|
|
new_room_user_id=new_room_user_id,
|
|
|
|
new_room_name=new_room_name,
|
|
|
|
message=message,
|
|
|
|
block=block,
|
|
|
|
)
|
|
|
|
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_PURGING
|
|
|
|
|
|
|
|
if purge:
|
|
|
|
logger.info("starting purge room_id %s", room_id)
|
|
|
|
|
|
|
|
# first check that we have no users in this room
|
|
|
|
if not force_purge:
|
|
|
|
joined = await self.store.is_host_joined(
|
|
|
|
room_id, self._server_name
|
|
|
|
)
|
|
|
|
if joined:
|
|
|
|
raise SynapseError(
|
|
|
|
400, "Users are still joined to this room"
|
|
|
|
)
|
|
|
|
|
2022-05-31 06:17:50 -06:00
|
|
|
await self._storage_controllers.purge_events.purge_room(room_id)
|
2021-11-12 05:35:31 -07:00
|
|
|
|
2023-03-08 13:08:56 -07:00
|
|
|
logger.info("purge complete for room_id %s", room_id)
|
2021-11-12 05:35:31 -07:00
|
|
|
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
|
|
|
|
except Exception:
|
|
|
|
f = Failure()
|
|
|
|
logger.error(
|
|
|
|
"failed",
|
2023-05-31 05:18:29 -06:00
|
|
|
exc_info=(f.type, f.value, f.getTracebackObject()),
|
2021-11-12 05:35:31 -07:00
|
|
|
)
|
|
|
|
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED
|
|
|
|
self._delete_by_id[delete_id].error = f.getErrorMessage()
|
|
|
|
finally:
|
|
|
|
self._purges_in_progress_by_room.discard(room_id)
|
|
|
|
|
|
|
|
# remove the delete from the list 24 hours after it completes
|
|
|
|
def clear_delete() -> None:
|
|
|
|
del self._delete_by_id[delete_id]
|
|
|
|
self._delete_by_room[room_id].remove(delete_id)
|
|
|
|
if not self._delete_by_room[room_id]:
|
|
|
|
del self._delete_by_room[room_id]
|
|
|
|
|
|
|
|
self.hs.get_reactor().callLater(
|
|
|
|
PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_delete
|
|
|
|
)
|
|
|
|
|
|
|
|
def start_shutdown_and_purge_room(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
2023-09-06 04:50:07 -06:00
|
|
|
requester_user_id: Optional[str],
|
2021-11-12 05:35:31 -07:00
|
|
|
new_room_user_id: Optional[str] = None,
|
|
|
|
new_room_name: Optional[str] = None,
|
|
|
|
message: Optional[str] = None,
|
|
|
|
block: bool = False,
|
|
|
|
purge: bool = True,
|
|
|
|
force_purge: bool = False,
|
|
|
|
) -> str:
|
|
|
|
"""Start off shut down and purge on a room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The ID of the room to shut down.
|
|
|
|
requester_user_id:
|
|
|
|
User who requested the action and put the room on the
|
|
|
|
blocking list.
|
2023-09-06 04:50:07 -06:00
|
|
|
If None, the action was not manually requested but instead
|
|
|
|
triggered automatically, e.g. through a Synapse module
|
|
|
|
or some other policy.
|
|
|
|
MUST NOT be None if block=True.
|
2021-11-12 05:35:31 -07:00
|
|
|
new_room_user_id:
|
|
|
|
If set, a new room will be created with this user ID
|
|
|
|
as the creator and admin, and all users in the old room will be
|
|
|
|
moved into that room. If not set, no new room will be created
|
|
|
|
and the users will just be removed from the old room.
|
|
|
|
new_room_name:
|
|
|
|
A string representing the name of the room that new users will
|
|
|
|
be invited to. Defaults to `Content Violation Notification`
|
|
|
|
message:
|
|
|
|
A string containing the first message that will be sent as
|
|
|
|
`new_room_user_id` in the new room. Ideally this will clearly
|
|
|
|
convey why the original room was shut down.
|
|
|
|
Defaults to `Sharing illegal content on this server is not
|
|
|
|
permitted and rooms in violation will be blocked.`
|
|
|
|
block:
|
|
|
|
If set to `true`, this room will be added to a blocking list,
|
|
|
|
preventing future attempts to join the room. Defaults to `false`.
|
|
|
|
purge:
|
|
|
|
If set to `true`, purge the given room from the database.
|
|
|
|
force_purge:
|
|
|
|
If set to `true`, the room will be purged from database
|
|
|
|
also if it fails to remove some users from room.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
unique ID for this delete transaction.
|
|
|
|
"""
|
|
|
|
if room_id in self._purges_in_progress_by_room:
|
|
|
|
raise SynapseError(
|
|
|
|
400, "History purge already in progress for %s" % (room_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
# This check is double to `RoomShutdownHandler.shutdown_room`
|
|
|
|
# But here the requester get a direct response / error with HTTP request
|
|
|
|
# and do not have to check the purge status
|
|
|
|
if new_room_user_id is not None:
|
|
|
|
if not self.hs.is_mine_id(new_room_user_id):
|
|
|
|
raise SynapseError(
|
|
|
|
400, "User must be our own: %s" % (new_room_user_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
delete_id = random_string(16)
|
|
|
|
|
|
|
|
# we log the delete_id here so that it can be tied back to the
|
|
|
|
# request id in the log lines.
|
|
|
|
logger.info(
|
|
|
|
"starting shutdown room_id %s with delete_id %s",
|
|
|
|
room_id,
|
|
|
|
delete_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
self._delete_by_id[delete_id] = DeleteStatus()
|
|
|
|
self._delete_by_room.setdefault(room_id, []).append(delete_id)
|
|
|
|
run_as_background_process(
|
|
|
|
"shutdown_and_purge_room",
|
|
|
|
self._shutdown_and_purge_room,
|
|
|
|
delete_id,
|
|
|
|
room_id,
|
|
|
|
requester_user_id,
|
|
|
|
new_room_user_id,
|
|
|
|
new_room_name,
|
|
|
|
message,
|
|
|
|
block,
|
|
|
|
purge,
|
|
|
|
force_purge,
|
|
|
|
)
|
|
|
|
return delete_id
|