2020-05-13 06:38:22 -06:00
|
|
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import logging
|
2022-02-21 09:03:06 -07:00
|
|
|
from typing import Any, List, Set, Tuple, cast
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
from synapse.api.errors import SynapseError
|
2022-02-21 09:03:06 -07:00
|
|
|
from synapse.storage.database import LoggingTransaction
|
2021-06-01 05:04:47 -06:00
|
|
|
from synapse.storage.databases.main import CacheInvalidationWorkerStore
|
2020-08-05 14:38:57 -06:00
|
|
|
from synapse.storage.databases.main.state import StateGroupWorkerStore
|
2022-07-18 07:17:24 -06:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
|
|
|
from synapse.storage.engines._base import IsolationLevel
|
2020-05-13 06:38:22 -06:00
|
|
|
from synapse.types import RoomStreamToken
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2021-06-01 05:04:47 -06:00
|
|
|
class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
2020-09-01 07:21:48 -06:00
|
|
|
async def purge_history(
|
|
|
|
self, room_id: str, token: str, delete_local_events: bool
|
|
|
|
) -> Set[int]:
|
2021-03-03 09:04:08 -07:00
|
|
|
"""Deletes room history before a certain point.
|
|
|
|
|
|
|
|
Note that only a single purge can occur at once, this is guaranteed via
|
|
|
|
a higher level (in the PaginationHandler).
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
Args:
|
2020-09-01 07:21:48 -06:00
|
|
|
room_id:
|
|
|
|
token: A topological token to delete events before
|
|
|
|
delete_local_events:
|
2020-05-13 06:38:22 -06:00
|
|
|
if True, we will delete local events as well as remote ones
|
|
|
|
(instead of just marking them as outliers and deleting their
|
|
|
|
state groups).
|
|
|
|
|
|
|
|
Returns:
|
2020-09-01 07:21:48 -06:00
|
|
|
The set of state groups that are referenced by deleted events.
|
2020-05-13 06:38:22 -06:00
|
|
|
"""
|
|
|
|
|
2020-09-30 13:29:19 -06:00
|
|
|
parsed_token = await RoomStreamToken.parse(self, token)
|
|
|
|
|
2020-09-01 07:21:48 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-05-13 06:38:22 -06:00
|
|
|
"purge_history",
|
|
|
|
self._purge_history_txn,
|
|
|
|
room_id,
|
2020-09-30 13:29:19 -06:00
|
|
|
parsed_token,
|
2020-05-13 06:38:22 -06:00
|
|
|
delete_local_events,
|
|
|
|
)
|
|
|
|
|
2021-03-03 09:04:08 -07:00
|
|
|
def _purge_history_txn(
|
2022-02-21 09:03:06 -07:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
token: RoomStreamToken,
|
|
|
|
delete_local_events: bool,
|
2021-03-03 09:04:08 -07:00
|
|
|
) -> Set[int]:
|
2020-05-13 06:38:22 -06:00
|
|
|
# Tables that should be pruned:
|
|
|
|
# event_auth
|
|
|
|
# event_backward_extremities
|
|
|
|
# event_edges
|
|
|
|
# event_forward_extremities
|
|
|
|
# event_json
|
|
|
|
# event_push_actions
|
2020-07-29 11:55:01 -06:00
|
|
|
# event_relations
|
2020-05-13 06:38:22 -06:00
|
|
|
# event_search
|
|
|
|
# event_to_state_groups
|
|
|
|
# events
|
|
|
|
# rejections
|
|
|
|
# room_depth
|
|
|
|
# state_groups
|
|
|
|
# state_groups_state
|
2020-09-04 05:22:23 -06:00
|
|
|
# destination_rooms
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
# we will build a temporary table listing the events so that we don't
|
|
|
|
# have to keep shovelling the list back and forth across the
|
|
|
|
# connection. Annoyingly the python sqlite driver commits the
|
|
|
|
# transaction on CREATE, so let's do this first.
|
|
|
|
#
|
|
|
|
# furthermore, we might already have the table from a previous (failed)
|
|
|
|
# purge attempt, so let's drop the table first.
|
|
|
|
|
|
|
|
txn.execute("DROP TABLE IF EXISTS events_to_purge")
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"CREATE TEMPORARY TABLE events_to_purge ("
|
|
|
|
" event_id TEXT NOT NULL,"
|
|
|
|
" should_delete BOOLEAN NOT NULL"
|
|
|
|
")"
|
|
|
|
)
|
|
|
|
|
|
|
|
# First ensure that we're not about to delete all the forward extremeties
|
|
|
|
txn.execute(
|
|
|
|
"SELECT e.event_id, e.depth FROM events as e "
|
|
|
|
"INNER JOIN event_forward_extremities as f "
|
|
|
|
"ON e.event_id = f.event_id "
|
|
|
|
"AND e.room_id = f.room_id "
|
|
|
|
"WHERE f.room_id = ?",
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
rows = txn.fetchall()
|
2021-09-24 03:19:51 -06:00
|
|
|
# if we already have no forwards extremities (for example because they were
|
|
|
|
# cleared out by the `delete_old_current_state_events` background database
|
|
|
|
# update), then we may as well carry on.
|
|
|
|
if rows:
|
|
|
|
max_depth = max(row[1] for row in rows)
|
|
|
|
|
|
|
|
if max_depth < token.topological:
|
|
|
|
# We need to ensure we don't delete all the events from the database
|
|
|
|
# otherwise we wouldn't be able to send any events (due to not
|
|
|
|
# having any backwards extremities)
|
|
|
|
raise SynapseError(
|
|
|
|
400, "topological_ordering is greater than forward extremities"
|
|
|
|
)
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
logger.info("[purge] looking for events to delete")
|
|
|
|
|
2021-12-02 15:42:58 -07:00
|
|
|
should_delete_expr = "state_events.state_key IS NULL"
|
2021-07-15 10:46:54 -06:00
|
|
|
should_delete_params: Tuple[Any, ...] = ()
|
2020-05-13 06:38:22 -06:00
|
|
|
if not delete_local_events:
|
|
|
|
should_delete_expr += " AND event_id NOT LIKE ?"
|
|
|
|
|
|
|
|
# We include the parameter twice since we use the expression twice
|
|
|
|
should_delete_params += ("%:" + self.hs.hostname, "%:" + self.hs.hostname)
|
|
|
|
|
|
|
|
should_delete_params += (room_id, token.topological)
|
|
|
|
|
|
|
|
# Note that we insert events that are outliers and aren't going to be
|
|
|
|
# deleted, as nothing will happen to them.
|
|
|
|
txn.execute(
|
|
|
|
"INSERT INTO events_to_purge"
|
|
|
|
" SELECT event_id, %s"
|
|
|
|
" FROM events AS e LEFT JOIN state_events USING (event_id)"
|
|
|
|
" WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?"
|
|
|
|
% (should_delete_expr, should_delete_expr),
|
|
|
|
should_delete_params,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We create the indices *after* insertion as that's a lot faster.
|
|
|
|
|
|
|
|
# create an index on should_delete because later we'll be looking for
|
|
|
|
# the should_delete / shouldn't_delete subsets
|
|
|
|
txn.execute(
|
|
|
|
"CREATE INDEX events_to_purge_should_delete"
|
|
|
|
" ON events_to_purge(should_delete)"
|
|
|
|
)
|
|
|
|
|
|
|
|
# We do joins against events_to_purge for e.g. calculating state
|
|
|
|
# groups to purge, etc., so lets make an index.
|
|
|
|
txn.execute("CREATE INDEX events_to_purge_id ON events_to_purge(event_id)")
|
|
|
|
|
|
|
|
txn.execute("SELECT event_id, should_delete FROM events_to_purge")
|
|
|
|
event_rows = txn.fetchall()
|
|
|
|
logger.info(
|
|
|
|
"[purge] found %i events before cutoff, of which %i can be deleted",
|
|
|
|
len(event_rows),
|
|
|
|
sum(1 for e in event_rows if e[1]),
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("[purge] Finding new backward extremities")
|
|
|
|
|
2021-03-03 09:04:08 -07:00
|
|
|
# We calculate the new entries for the backward extremities by finding
|
2020-05-13 06:38:22 -06:00
|
|
|
# events to be purged that are pointed to by events we're not going to
|
|
|
|
# purge.
|
|
|
|
txn.execute(
|
|
|
|
"SELECT DISTINCT e.event_id FROM events_to_purge AS e"
|
|
|
|
" INNER JOIN event_edges AS ed ON e.event_id = ed.prev_event_id"
|
|
|
|
" LEFT JOIN events_to_purge AS ep2 ON ed.event_id = ep2.event_id"
|
|
|
|
" WHERE ep2.event_id IS NULL"
|
|
|
|
)
|
|
|
|
new_backwards_extrems = txn.fetchall()
|
|
|
|
|
|
|
|
logger.info("[purge] replacing backward extremities: %r", new_backwards_extrems)
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"DELETE FROM event_backward_extremities WHERE room_id = ?", (room_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Update backward extremeties
|
2021-01-21 07:44:12 -07:00
|
|
|
txn.execute_batch(
|
2020-05-13 06:38:22 -06:00
|
|
|
"INSERT INTO event_backward_extremities (room_id, event_id)"
|
|
|
|
" VALUES (?, ?)",
|
|
|
|
[(room_id, event_id) for event_id, in new_backwards_extrems],
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("[purge] finding state groups referenced by deleted events")
|
|
|
|
|
|
|
|
# Get all state groups that are referenced by events that are to be
|
|
|
|
# deleted.
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT DISTINCT state_group FROM events_to_purge
|
|
|
|
INNER JOIN event_to_state_groups USING (event_id)
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
|
|
|
referenced_state_groups = {sg for sg, in txn}
|
|
|
|
logger.info(
|
|
|
|
"[purge] found %i referenced state groups", len(referenced_state_groups)
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("[purge] removing events from event_to_state_groups")
|
|
|
|
txn.execute(
|
|
|
|
"DELETE FROM event_to_state_groups "
|
|
|
|
"WHERE event_id IN (SELECT event_id from events_to_purge)"
|
|
|
|
)
|
|
|
|
|
|
|
|
# Delete all remote non-state events
|
|
|
|
for table in (
|
2022-06-15 05:29:42 -06:00
|
|
|
"event_edges",
|
2020-05-13 06:38:22 -06:00
|
|
|
"events",
|
|
|
|
"event_json",
|
|
|
|
"event_auth",
|
|
|
|
"event_forward_extremities",
|
2020-07-29 11:55:01 -06:00
|
|
|
"event_relations",
|
2020-05-13 06:38:22 -06:00
|
|
|
"event_search",
|
|
|
|
"rejections",
|
2021-07-09 04:03:02 -06:00
|
|
|
"redactions",
|
2020-05-13 06:38:22 -06:00
|
|
|
):
|
|
|
|
logger.info("[purge] removing events from %s", table)
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"DELETE FROM %s WHERE event_id IN ("
|
|
|
|
" SELECT event_id FROM events_to_purge WHERE should_delete"
|
|
|
|
")" % (table,)
|
|
|
|
)
|
|
|
|
|
|
|
|
# event_push_actions lacks an index on event_id, and has one on
|
|
|
|
# (room_id, event_id) instead.
|
|
|
|
for table in ("event_push_actions",):
|
|
|
|
logger.info("[purge] removing events from %s", table)
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"DELETE FROM %s WHERE room_id = ? AND event_id IN ("
|
|
|
|
" SELECT event_id FROM events_to_purge WHERE should_delete"
|
|
|
|
")" % (table,),
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
|
|
|
|
# Mark all state and own events as outliers
|
|
|
|
logger.info("[purge] marking remaining events as outliers")
|
|
|
|
txn.execute(
|
|
|
|
"UPDATE events SET outlier = ?"
|
|
|
|
" WHERE event_id IN ("
|
|
|
|
" SELECT event_id FROM events_to_purge "
|
|
|
|
" WHERE NOT should_delete"
|
|
|
|
")",
|
|
|
|
(True,),
|
|
|
|
)
|
|
|
|
|
|
|
|
# synapse tries to take out an exclusive lock on room_depth whenever it
|
|
|
|
# persists events (because upsert), and once we run this update, we
|
|
|
|
# will block that for the rest of our transaction.
|
|
|
|
#
|
|
|
|
# So, let's stick it at the end so that we don't block event
|
|
|
|
# persistence.
|
|
|
|
#
|
|
|
|
# We do this by calculating the minimum depth of the backwards
|
|
|
|
# extremities. However, the events in event_backward_extremities
|
|
|
|
# are ones we don't have yet so we need to look at the events that
|
|
|
|
# point to it via event_edges table.
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT COALESCE(MIN(depth), 0)
|
|
|
|
FROM event_backward_extremities AS eb
|
|
|
|
INNER JOIN event_edges AS eg ON eg.prev_event_id = eb.event_id
|
|
|
|
INNER JOIN events AS e ON e.event_id = eg.event_id
|
|
|
|
WHERE eb.room_id = ?
|
|
|
|
""",
|
|
|
|
(room_id,),
|
|
|
|
)
|
2022-02-21 09:03:06 -07:00
|
|
|
(min_depth,) = cast(Tuple[int], txn.fetchone())
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
logger.info("[purge] updating room_depth to %d", min_depth)
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
|
|
|
|
(min_depth, room_id),
|
|
|
|
)
|
|
|
|
|
|
|
|
# finally, drop the temp table. this will commit the txn in sqlite,
|
|
|
|
# so make sure to keep this actually last.
|
|
|
|
txn.execute("DROP TABLE events_to_purge")
|
|
|
|
|
2021-06-01 05:04:47 -06:00
|
|
|
for event_id, should_delete in event_rows:
|
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self._get_state_group_for_event, (event_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
# XXX: This is racy, since have_seen_events could be called between the
|
|
|
|
# transaction completing and the invalidation running. On the other hand,
|
|
|
|
# that's no different to calling `have_seen_events` just before the
|
|
|
|
# event is deleted from the database.
|
|
|
|
if should_delete:
|
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.have_seen_event, (room_id, event_id)
|
|
|
|
)
|
2022-07-19 05:25:29 -06:00
|
|
|
self.invalidate_get_event_cache_after_txn(txn, event_id)
|
2021-06-01 05:04:47 -06:00
|
|
|
|
2020-05-13 06:38:22 -06:00
|
|
|
logger.info("[purge] done")
|
|
|
|
|
|
|
|
return referenced_state_groups
|
|
|
|
|
2020-09-01 07:21:48 -06:00
|
|
|
async def purge_room(self, room_id: str) -> List[int]:
|
2020-05-13 06:38:22 -06:00
|
|
|
"""Deletes all record of a room
|
|
|
|
|
|
|
|
Args:
|
2020-09-01 07:21:48 -06:00
|
|
|
room_id
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
Returns:
|
2020-09-01 07:21:48 -06:00
|
|
|
The list of state groups to delete.
|
2020-05-13 06:38:22 -06:00
|
|
|
"""
|
2022-07-18 07:17:24 -06:00
|
|
|
|
|
|
|
# This first runs the purge transaction with READ_COMMITTED isolation level,
|
|
|
|
# meaning any new rows in the tables will not trigger a serialization error.
|
|
|
|
# We then run the same purge a second time without this isolation level to
|
|
|
|
# purge any of those rows which were added during the first.
|
|
|
|
|
2023-03-08 13:08:56 -07:00
|
|
|
logger.info("[purge] Starting initial main purge of [1/2]")
|
2022-07-18 07:17:24 -06:00
|
|
|
state_groups_to_delete = await self.db_pool.runInteraction(
|
|
|
|
"purge_room",
|
|
|
|
self._purge_room_txn,
|
|
|
|
room_id=room_id,
|
|
|
|
isolation_level=IsolationLevel.READ_COMMITTED,
|
|
|
|
)
|
|
|
|
|
2023-03-08 13:08:56 -07:00
|
|
|
logger.info("[purge] Starting secondary main purge of [2/2]")
|
2022-07-18 07:17:24 -06:00
|
|
|
state_groups_to_delete.extend(
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"purge_room",
|
|
|
|
self._purge_room_txn,
|
|
|
|
room_id=room_id,
|
|
|
|
),
|
2020-09-01 07:21:48 -06:00
|
|
|
)
|
2023-03-08 13:08:56 -07:00
|
|
|
logger.info("[purge] Done with main purge")
|
2020-05-13 06:38:22 -06:00
|
|
|
|
2022-07-18 07:17:24 -06:00
|
|
|
return state_groups_to_delete
|
|
|
|
|
2022-02-21 09:03:06 -07:00
|
|
|
def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]:
|
2022-07-18 07:17:24 -06:00
|
|
|
# This collides with event persistence so we cannot write new events and metadata into
|
|
|
|
# a room while deleting it or this transaction will fail.
|
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
txn.execute(
|
|
|
|
"SELECT room_version FROM rooms WHERE room_id = ? FOR UPDATE",
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
|
2022-05-27 03:31:08 -06:00
|
|
|
# First, fetch all the state groups that should be deleted, before
|
2020-05-13 06:38:22 -06:00
|
|
|
# we delete that information.
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT DISTINCT state_group FROM events
|
|
|
|
INNER JOIN event_to_state_groups USING(event_id)
|
|
|
|
WHERE events.room_id = ?
|
|
|
|
""",
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
|
|
|
|
state_groups = [row[0] for row in txn]
|
|
|
|
|
2021-03-03 09:04:08 -07:00
|
|
|
# Get all the auth chains that are referenced by events that are to be
|
|
|
|
# deleted.
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT chain_id, sequence_number FROM events
|
|
|
|
LEFT JOIN event_auth_chains USING (event_id)
|
|
|
|
WHERE room_id = ?
|
|
|
|
""",
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
referenced_chain_id_tuples = list(txn)
|
|
|
|
|
2023-03-08 13:08:56 -07:00
|
|
|
logger.info("[purge] removing from event_auth_chain_links")
|
2021-03-03 09:04:08 -07:00
|
|
|
txn.executemany(
|
|
|
|
"""
|
|
|
|
DELETE FROM event_auth_chain_links WHERE
|
2021-03-09 09:22:25 -07:00
|
|
|
origin_chain_id = ? AND origin_sequence_number = ?
|
2021-03-03 09:04:08 -07:00
|
|
|
""",
|
2021-03-09 09:22:25 -07:00
|
|
|
referenced_chain_id_tuples,
|
2021-03-03 09:04:08 -07:00
|
|
|
)
|
|
|
|
|
2020-05-13 06:38:22 -06:00
|
|
|
# Now we delete tables which lack an index on room_id but have one on event_id
|
|
|
|
for table in (
|
|
|
|
"event_auth",
|
|
|
|
"event_edges",
|
2020-11-30 11:44:09 -07:00
|
|
|
"event_json",
|
2020-05-13 06:38:22 -06:00
|
|
|
"event_push_actions_staging",
|
|
|
|
"event_relations",
|
|
|
|
"event_to_state_groups",
|
2021-03-03 09:04:08 -07:00
|
|
|
"event_auth_chains",
|
|
|
|
"event_auth_chain_to_calculate",
|
2020-05-13 06:38:22 -06:00
|
|
|
"redactions",
|
|
|
|
"rejections",
|
|
|
|
"state_events",
|
|
|
|
):
|
2023-03-08 13:08:56 -07:00
|
|
|
logger.info("[purge] removing from %s", table)
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
DELETE FROM %s WHERE event_id IN (
|
|
|
|
SELECT event_id FROM events WHERE room_id=?
|
|
|
|
)
|
|
|
|
"""
|
|
|
|
% (table,),
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
|
2022-05-27 03:31:08 -06:00
|
|
|
# next, the tables with an index on room_id (or no useful index)
|
2020-05-13 06:38:22 -06:00
|
|
|
for table in (
|
|
|
|
"current_state_events",
|
2020-09-04 05:22:23 -06:00
|
|
|
"destination_rooms",
|
2020-05-13 06:38:22 -06:00
|
|
|
"event_backward_extremities",
|
|
|
|
"event_forward_extremities",
|
|
|
|
"event_push_actions",
|
|
|
|
"event_search",
|
2022-09-16 09:56:56 -06:00
|
|
|
"event_failed_pull_attempts",
|
2023-02-14 16:42:29 -07:00
|
|
|
# Note: the partial state tables have foreign keys between each other, and to
|
|
|
|
# `events` and `rooms`. We need to delete from them in the right order.
|
2022-05-27 03:31:08 -06:00
|
|
|
"partial_state_events",
|
2023-02-14 16:42:29 -07:00
|
|
|
"partial_state_rooms_servers",
|
|
|
|
"partial_state_rooms",
|
2020-05-13 06:38:22 -06:00
|
|
|
"events",
|
2022-05-27 03:31:08 -06:00
|
|
|
"federation_inbound_events_staging",
|
2023-03-29 06:24:28 -06:00
|
|
|
"local_current_membership",
|
2020-05-13 06:38:22 -06:00
|
|
|
"receipts_graph",
|
|
|
|
"receipts_linearized",
|
|
|
|
"room_aliases",
|
|
|
|
"room_depth",
|
2023-03-29 06:24:28 -06:00
|
|
|
"room_memberships",
|
2020-05-13 06:38:22 -06:00
|
|
|
"room_stats_state",
|
|
|
|
"room_stats_current",
|
|
|
|
"room_stats_earliest_token",
|
|
|
|
"stream_ordering_to_exterm",
|
|
|
|
"users_in_public_rooms",
|
|
|
|
"users_who_share_private_rooms",
|
|
|
|
# no useful index, but let's clear them anyway
|
|
|
|
"appservice_room_list",
|
|
|
|
"e2e_room_keys",
|
|
|
|
"event_push_summary",
|
|
|
|
"pusher_throttle",
|
2022-09-16 09:56:56 -06:00
|
|
|
"insertion_events",
|
|
|
|
"insertion_event_extremities",
|
|
|
|
"insertion_event_edges",
|
|
|
|
"batch_events",
|
2020-05-13 06:38:22 -06:00
|
|
|
"room_account_data",
|
|
|
|
"room_tags",
|
2022-05-27 03:31:08 -06:00
|
|
|
# "rooms" happens last, to keep the foreign keys in the other tables
|
|
|
|
# happy
|
|
|
|
"rooms",
|
2020-05-13 06:38:22 -06:00
|
|
|
):
|
2023-03-08 13:08:56 -07:00
|
|
|
logger.info("[purge] removing from %s", table)
|
2020-05-13 06:38:22 -06:00
|
|
|
txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
|
|
|
|
|
|
|
|
# Other tables we do NOT need to clear out:
|
|
|
|
#
|
|
|
|
# - blocked_rooms
|
|
|
|
# This is important, to make sure that we don't accidentally rejoin a blocked
|
|
|
|
# room after it was purged
|
|
|
|
#
|
|
|
|
# - user_directory
|
|
|
|
# This has a room_id column, but it is unused
|
|
|
|
#
|
|
|
|
|
|
|
|
# Other tables that we might want to consider clearing out include:
|
|
|
|
#
|
|
|
|
# - event_reports
|
|
|
|
# Given that these are intended for abuse management my initial
|
|
|
|
# inclination is to leave them in place.
|
|
|
|
#
|
|
|
|
# - current_state_delta_stream
|
|
|
|
# - ex_outlier_stream
|
|
|
|
# - room_tags_revisions
|
|
|
|
# The problem with these is that they are largeish and there is no room_id
|
|
|
|
# index on them. In any case we should be clearing out 'stream' tables
|
|
|
|
# periodically anyway (#5888)
|
|
|
|
|
2021-06-01 05:04:47 -06:00
|
|
|
# TODO: we could probably usefully do a bunch more cache invalidation here
|
|
|
|
|
|
|
|
# XXX: as with purge_history, this is racy, but no worse than other races
|
|
|
|
# that already exist.
|
|
|
|
self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
|
2020-05-13 06:38:22 -06:00
|
|
|
|
|
|
|
return state_groups
|