2016-01-06 21:26:29 -07:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2022-12-05 06:07:55 -07:00
|
|
|
# Copyright 2019, 2022 The Matrix.org Foundation C.I.C.
|
2014-08-12 08:10:52 -06:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2014-08-12 20:14:34 -06:00
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
import logging
|
2020-01-07 07:18:43 -07:00
|
|
|
from abc import abstractmethod
|
2020-01-22 06:36:43 -07:00
|
|
|
from enum import Enum
|
2021-12-30 11:47:12 -07:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Awaitable,
|
2022-03-01 05:49:54 -07:00
|
|
|
Collection,
|
2021-12-30 11:47:12 -07:00
|
|
|
Dict,
|
|
|
|
List,
|
2022-05-31 09:15:08 -06:00
|
|
|
Mapping,
|
2021-12-30 11:47:12 -07:00
|
|
|
Optional,
|
2022-09-23 06:44:03 -06:00
|
|
|
Sequence,
|
2021-12-30 11:47:12 -07:00
|
|
|
Tuple,
|
|
|
|
Union,
|
|
|
|
cast,
|
|
|
|
)
|
|
|
|
|
|
|
|
import attr
|
2018-07-09 00:09:20 -06:00
|
|
|
|
2022-06-29 11:12:45 -06:00
|
|
|
from synapse.api.constants import (
|
|
|
|
EventContentFields,
|
|
|
|
EventTypes,
|
|
|
|
JoinRules,
|
|
|
|
PublicRoomsFilterFields,
|
|
|
|
)
|
2014-08-12 08:10:52 -06:00
|
|
|
from synapse.api.errors import StoreError
|
2020-01-27 07:30:57 -07:00
|
|
|
from synapse.api.room_versions import RoomVersion, RoomVersions
|
2022-03-28 12:11:14 -06:00
|
|
|
from synapse.config.homeserver import HomeServerConfig
|
2021-09-01 09:27:58 -06:00
|
|
|
from synapse.events import EventBase
|
2022-06-29 11:12:45 -06:00
|
|
|
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
2021-12-13 10:05:00 -07:00
|
|
|
from synapse.storage.database import (
|
|
|
|
DatabasePool,
|
|
|
|
LoggingDatabaseConnection,
|
|
|
|
LoggingTransaction,
|
|
|
|
)
|
2021-12-15 11:00:48 -07:00
|
|
|
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
2022-12-05 06:07:55 -07:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
2021-07-12 10:22:54 -06:00
|
|
|
from synapse.storage.types import Cursor
|
2022-12-05 06:07:55 -07:00
|
|
|
from synapse.storage.util.id_generators import (
|
|
|
|
AbstractStreamIdGenerator,
|
|
|
|
IdGenerator,
|
|
|
|
MultiWriterIdGenerator,
|
|
|
|
StreamIdGenerator,
|
|
|
|
)
|
2022-05-23 11:18:23 -06:00
|
|
|
from synapse.types import JsonDict, RetentionPolicy, ThirdPartyInstanceID
|
2020-08-20 08:32:33 -06:00
|
|
|
from synapse.util import json_encoder
|
2020-07-30 05:20:41 -06:00
|
|
|
from synapse.util.caches.descriptors import cached
|
2021-01-20 06:15:14 -07:00
|
|
|
from synapse.util.stringutils import MXC_REGEX
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2021-10-22 11:15:41 -06:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2021-12-30 11:47:12 -07:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class RatelimitOverride:
|
|
|
|
messages_per_second: int
|
|
|
|
burst_count: int
|
2017-05-10 04:05:43 -06:00
|
|
|
|
2014-09-01 09:15:34 -06:00
|
|
|
|
2020-01-22 06:36:43 -07:00
|
|
|
class RoomSortOrder(Enum):
|
|
|
|
"""
|
|
|
|
Enum to define the sorting method used when returning rooms with get_rooms_paginate
|
|
|
|
|
2020-04-22 06:38:41 -06:00
|
|
|
NAME = sort rooms alphabetically by name
|
|
|
|
JOINED_MEMBERS = sort rooms by membership size, highest to lowest
|
2020-01-22 06:36:43 -07:00
|
|
|
"""
|
|
|
|
|
2020-04-22 06:38:41 -06:00
|
|
|
# ALPHABETICAL and SIZE are deprecated.
|
|
|
|
# ALPHABETICAL is the same as NAME.
|
2020-01-22 06:36:43 -07:00
|
|
|
ALPHABETICAL = "alphabetical"
|
2020-04-22 06:38:41 -06:00
|
|
|
# SIZE is the same as JOINED_MEMBERS.
|
2020-01-22 06:36:43 -07:00
|
|
|
SIZE = "size"
|
2020-04-22 06:38:41 -06:00
|
|
|
NAME = "name"
|
|
|
|
CANONICAL_ALIAS = "canonical_alias"
|
|
|
|
JOINED_MEMBERS = "joined_members"
|
|
|
|
JOINED_LOCAL_MEMBERS = "joined_local_members"
|
|
|
|
VERSION = "version"
|
|
|
|
CREATOR = "creator"
|
|
|
|
ENCRYPTION = "encryption"
|
|
|
|
FEDERATABLE = "federatable"
|
|
|
|
PUBLIC = "public"
|
|
|
|
JOIN_RULES = "join_rules"
|
|
|
|
GUEST_ACCESS = "guest_access"
|
|
|
|
HISTORY_VISIBILITY = "history_visibility"
|
|
|
|
STATE_EVENTS = "state_events"
|
2020-01-22 06:36:43 -07:00
|
|
|
|
|
|
|
|
2022-10-18 05:33:18 -06:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class PartialStateResyncInfo:
|
|
|
|
joined_via: Optional[str]
|
|
|
|
servers_in_room: List[str] = attr.ib(factory=list)
|
|
|
|
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
class RoomWorkerStore(CacheInvalidationWorkerStore):
|
2021-12-13 10:05:00 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2020-09-18 07:56:44 -06:00
|
|
|
super().__init__(database, db_conn, hs)
|
2019-12-10 06:05:35 -07:00
|
|
|
|
2022-03-28 12:11:14 -06:00
|
|
|
self.config: HomeServerConfig = hs.config
|
2019-12-10 06:05:35 -07:00
|
|
|
|
2022-12-05 06:07:55 -07:00
|
|
|
self._un_partial_stated_rooms_stream_id_gen: AbstractStreamIdGenerator
|
|
|
|
|
|
|
|
if isinstance(database.engine, PostgresEngine):
|
|
|
|
self._un_partial_stated_rooms_stream_id_gen = MultiWriterIdGenerator(
|
|
|
|
db_conn=db_conn,
|
|
|
|
db=database,
|
|
|
|
stream_name="un_partial_stated_room_stream",
|
|
|
|
instance_name=self._instance_name,
|
|
|
|
tables=[
|
|
|
|
("un_partial_stated_room_stream", "instance_name", "stream_id")
|
|
|
|
],
|
|
|
|
sequence_name="un_partial_stated_room_stream_sequence",
|
|
|
|
# TODO(faster_joins, multiple writers) Support multiple writers.
|
|
|
|
writers=["master"],
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self._un_partial_stated_rooms_stream_id_gen = StreamIdGenerator(
|
|
|
|
db_conn, "un_partial_stated_room_stream", "stream_id"
|
|
|
|
)
|
|
|
|
|
2021-08-17 07:33:16 -06:00
|
|
|
async def store_room(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
room_creator_user_id: str,
|
|
|
|
is_public: bool,
|
|
|
|
room_version: RoomVersion,
|
2021-12-15 11:00:48 -07:00
|
|
|
) -> None:
|
2021-08-17 07:33:16 -06:00
|
|
|
"""Stores a room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The desired room ID, can be None.
|
|
|
|
room_creator_user_id: The user ID of the room creator.
|
|
|
|
is_public: True to indicate that this room should appear in
|
|
|
|
public room lists.
|
|
|
|
room_version: The version of the room
|
|
|
|
Raises:
|
|
|
|
StoreError if the room could not be stored.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
await self.db_pool.simple_insert(
|
|
|
|
"rooms",
|
|
|
|
{
|
|
|
|
"room_id": room_id,
|
|
|
|
"creator": room_creator_user_id,
|
|
|
|
"is_public": is_public,
|
|
|
|
"room_version": room_version.identifier,
|
|
|
|
"has_auth_chain_index": True,
|
|
|
|
},
|
|
|
|
desc="store_room",
|
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
logger.error("store_room with room_id=%s failed: %s", room_id, e)
|
|
|
|
raise StoreError(500, "Problem creating room.")
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
async def get_room(self, room_id: str) -> Optional[Dict[str, Any]]:
|
2018-07-26 06:31:59 -06:00
|
|
|
"""Retrieve a room.
|
|
|
|
|
|
|
|
Args:
|
2020-08-26 05:19:32 -06:00
|
|
|
room_id: The ID of the room to retrieve.
|
2018-07-26 06:31:59 -06:00
|
|
|
Returns:
|
2018-10-12 04:13:40 -06:00
|
|
|
A dict containing the room information, or None if the room is unknown.
|
2018-07-26 06:31:59 -06:00
|
|
|
"""
|
2020-08-26 05:19:32 -06:00
|
|
|
return await self.db_pool.simple_select_one(
|
2018-07-26 06:31:59 -06:00
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
2021-01-11 09:09:22 -07:00
|
|
|
retcols=("room_id", "is_public", "creator", "has_auth_chain_index"),
|
2018-07-26 06:31:59 -06:00
|
|
|
desc="get_room",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
async def get_room_with_stats(self, room_id: str) -> Optional[Dict[str, Any]]:
|
2020-05-07 13:33:07 -06:00
|
|
|
"""Retrieve room with statistics.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The ID of the room to retrieve.
|
|
|
|
Returns:
|
|
|
|
A dict containing the room information, or None if the room is unknown.
|
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def get_room_with_stats_txn(
|
|
|
|
txn: LoggingTransaction, room_id: str
|
|
|
|
) -> Optional[Dict[str, Any]]:
|
2020-05-07 13:33:07 -06:00
|
|
|
sql = """
|
|
|
|
SELECT room_id, state.name, state.canonical_alias, curr.joined_members,
|
|
|
|
curr.local_users_in_room AS joined_local_members, rooms.room_version AS version,
|
|
|
|
rooms.creator, state.encryption, state.is_federatable AS federatable,
|
|
|
|
rooms.is_public AS public, state.join_rules, state.guest_access,
|
2020-09-14 08:07:04 -06:00
|
|
|
state.history_visibility, curr.current_state_events AS state_events,
|
2022-07-12 08:30:53 -06:00
|
|
|
state.avatar, state.topic, state.room_type
|
2020-05-07 13:33:07 -06:00
|
|
|
FROM rooms
|
|
|
|
LEFT JOIN room_stats_state state USING (room_id)
|
|
|
|
LEFT JOIN room_stats_current curr USING (room_id)
|
|
|
|
WHERE room_id = ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, [room_id])
|
2020-07-14 05:36:23 -06:00
|
|
|
# Catch error if sql returns empty result to return "None" instead of an error
|
|
|
|
try:
|
2020-08-05 14:38:57 -06:00
|
|
|
res = self.db_pool.cursor_to_dict(txn)[0]
|
2020-07-14 05:36:23 -06:00
|
|
|
except IndexError:
|
|
|
|
return None
|
|
|
|
|
2020-05-07 13:33:07 -06:00
|
|
|
res["federatable"] = bool(res["federatable"])
|
|
|
|
res["public"] = bool(res["public"])
|
|
|
|
return res
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-05-07 13:33:07 -06:00
|
|
|
"get_room_with_stats", get_room_with_stats_txn, room_id
|
|
|
|
)
|
|
|
|
|
2020-08-27 05:08:38 -06:00
|
|
|
async def get_public_room_ids(self) -> List[str]:
|
|
|
|
return await self.db_pool.simple_select_onecol(
|
2018-03-01 04:39:45 -07:00
|
|
|
table="rooms",
|
2019-04-03 03:07:29 -06:00
|
|
|
keyvalues={"is_public": True},
|
2018-03-01 04:39:45 -07:00
|
|
|
retcol="room_id",
|
|
|
|
desc="get_public_room_ids",
|
|
|
|
)
|
|
|
|
|
2022-06-29 11:12:45 -06:00
|
|
|
def _construct_room_type_where_clause(
|
|
|
|
self, room_types: Union[List[Union[str, None]], None]
|
2022-10-05 06:49:52 -06:00
|
|
|
) -> Tuple[Union[str, None], list]:
|
2022-07-27 12:46:57 -06:00
|
|
|
if not room_types:
|
2022-06-29 11:12:45 -06:00
|
|
|
return None, []
|
|
|
|
|
2022-10-05 06:49:52 -06:00
|
|
|
# Since None is used to represent a room without a type, care needs to
|
|
|
|
# be taken into account when constructing the where clause.
|
|
|
|
clauses = []
|
|
|
|
args: list = []
|
|
|
|
|
|
|
|
room_types_set = set(room_types)
|
|
|
|
|
|
|
|
# We use None to represent a room without a type.
|
|
|
|
if None in room_types_set:
|
|
|
|
clauses.append("room_type IS NULL")
|
|
|
|
room_types_set.remove(None)
|
|
|
|
|
|
|
|
# If there are other room types, generate the proper clause.
|
|
|
|
if room_types:
|
2022-06-29 11:12:45 -06:00
|
|
|
list_clause, args = make_in_list_sql_clause(
|
2022-10-05 06:49:52 -06:00
|
|
|
self.database_engine, "room_type", room_types_set
|
2022-06-29 11:12:45 -06:00
|
|
|
)
|
2022-10-05 06:49:52 -06:00
|
|
|
clauses.append(list_clause)
|
2022-06-29 11:12:45 -06:00
|
|
|
|
2022-10-05 06:49:52 -06:00
|
|
|
return f"({' OR '.join(clauses)})", args
|
2022-06-29 11:12:45 -06:00
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
async def count_public_rooms(
|
|
|
|
self,
|
|
|
|
network_tuple: Optional[ThirdPartyInstanceID],
|
|
|
|
ignore_non_federatable: bool,
|
2022-06-29 11:12:45 -06:00
|
|
|
search_filter: Optional[dict],
|
2020-09-01 06:39:04 -06:00
|
|
|
) -> int:
|
2019-10-02 07:08:35 -06:00
|
|
|
"""Counts the number of public rooms as tracked in the room_stats_current
|
|
|
|
and room_stats_state table.
|
2018-03-01 04:39:45 -07:00
|
|
|
|
|
|
|
Args:
|
2020-09-01 06:39:04 -06:00
|
|
|
network_tuple
|
|
|
|
ignore_non_federatable: If true filters out non-federatable rooms
|
2022-06-29 11:12:45 -06:00
|
|
|
search_filter
|
2018-03-01 04:39:45 -07:00
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _count_public_rooms_txn(txn: LoggingTransaction) -> int:
|
2019-10-02 07:08:35 -06:00
|
|
|
query_args = []
|
|
|
|
|
|
|
|
if network_tuple:
|
|
|
|
if network_tuple.appservice_id:
|
|
|
|
published_sql = """
|
|
|
|
SELECT room_id from appservice_room_list
|
|
|
|
WHERE appservice_id = ? AND network_id = ?
|
|
|
|
"""
|
|
|
|
query_args.append(network_tuple.appservice_id)
|
2021-12-30 11:47:12 -07:00
|
|
|
assert network_tuple.network_id is not None
|
2019-10-02 07:08:35 -06:00
|
|
|
query_args.append(network_tuple.network_id)
|
|
|
|
else:
|
|
|
|
published_sql = """
|
|
|
|
SELECT room_id FROM rooms WHERE is_public
|
|
|
|
"""
|
|
|
|
else:
|
|
|
|
published_sql = """
|
|
|
|
SELECT room_id FROM rooms WHERE is_public
|
|
|
|
UNION SELECT room_id from appservice_room_list
|
|
|
|
"""
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2022-10-05 06:49:52 -06:00
|
|
|
room_type_clause, args = self._construct_room_type_where_clause(
|
|
|
|
search_filter.get(PublicRoomsFilterFields.ROOM_TYPES, None)
|
|
|
|
if search_filter
|
|
|
|
else None
|
|
|
|
)
|
|
|
|
room_type_clause = f" AND {room_type_clause}" if room_type_clause else ""
|
|
|
|
query_args += args
|
|
|
|
|
2022-05-24 10:50:50 -06:00
|
|
|
sql = f"""
|
2019-10-02 07:08:35 -06:00
|
|
|
SELECT
|
2021-12-14 05:34:30 -07:00
|
|
|
COUNT(*)
|
2019-10-02 07:08:35 -06:00
|
|
|
FROM (
|
2022-05-24 10:50:50 -06:00
|
|
|
{published_sql}
|
2019-10-02 07:08:35 -06:00
|
|
|
) published
|
|
|
|
INNER JOIN room_stats_state USING (room_id)
|
|
|
|
INNER JOIN room_stats_current USING (room_id)
|
|
|
|
WHERE
|
|
|
|
(
|
2022-05-24 10:50:50 -06:00
|
|
|
join_rules = '{JoinRules.PUBLIC}'
|
|
|
|
OR join_rules = '{JoinRules.KNOCK}'
|
|
|
|
OR join_rules = '{JoinRules.KNOCK_RESTRICTED}'
|
2021-06-09 13:31:31 -06:00
|
|
|
OR history_visibility = 'world_readable'
|
2019-10-02 07:08:35 -06:00
|
|
|
)
|
2022-06-29 11:12:45 -06:00
|
|
|
{room_type_clause}
|
2019-10-02 07:08:35 -06:00
|
|
|
AND joined_members > 0
|
2022-05-24 10:50:50 -06:00
|
|
|
"""
|
2019-10-02 07:08:35 -06:00
|
|
|
|
|
|
|
txn.execute(sql, query_args)
|
2021-12-15 11:00:48 -07:00
|
|
|
return cast(Tuple[int], txn.fetchone())[0]
|
2019-10-02 07:08:35 -06:00
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-08-05 14:38:57 -06:00
|
|
|
"count_public_rooms", _count_public_rooms_txn
|
|
|
|
)
|
2019-10-02 07:08:35 -06:00
|
|
|
|
2020-10-02 06:23:15 -06:00
|
|
|
async def get_room_count(self) -> int:
|
|
|
|
"""Retrieve the total number of rooms."""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def f(txn: LoggingTransaction) -> int:
|
2020-10-02 06:23:15 -06:00
|
|
|
sql = "SELECT count(*) FROM rooms"
|
|
|
|
txn.execute(sql)
|
2021-12-15 11:00:48 -07:00
|
|
|
row = cast(Tuple[int], txn.fetchone())
|
|
|
|
return row[0]
|
2020-10-02 06:23:15 -06:00
|
|
|
|
|
|
|
return await self.db_pool.runInteraction("get_rooms", f)
|
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
async def get_largest_public_rooms(
|
2019-10-02 07:08:35 -06:00
|
|
|
self,
|
2019-10-02 08:09:10 -06:00
|
|
|
network_tuple: Optional[ThirdPartyInstanceID],
|
|
|
|
search_filter: Optional[dict],
|
|
|
|
limit: Optional[int],
|
|
|
|
bounds: Optional[Tuple[int, str]],
|
|
|
|
forwards: bool,
|
|
|
|
ignore_non_federatable: bool = False,
|
2021-12-15 11:00:48 -07:00
|
|
|
) -> List[Dict[str, Any]]:
|
2019-10-02 07:08:35 -06:00
|
|
|
"""Gets the largest public rooms (where largest is in terms of joined
|
|
|
|
members, as tracked in the statistics table).
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 07:08:35 -06:00
|
|
|
Args:
|
2019-10-02 08:09:10 -06:00
|
|
|
network_tuple
|
|
|
|
search_filter
|
|
|
|
limit: Maxmimum number of rows to return, unlimited otherwise.
|
|
|
|
bounds: An uppoer or lower bound to apply to result set if given,
|
|
|
|
consists of a joined member count and room_id (these are
|
|
|
|
excluded from result set).
|
|
|
|
forwards: true iff going forwards, going backwards otherwise
|
|
|
|
ignore_non_federatable: If true filters out non-federatable rooms.
|
2019-10-02 07:08:35 -06:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
Rooms in order: biggest number of joined users first.
|
|
|
|
We then arbitrarily use the room_id as a tie breaker.
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 07:08:35 -06:00
|
|
|
"""
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 07:08:35 -06:00
|
|
|
where_clauses = []
|
2021-12-30 11:47:12 -07:00
|
|
|
query_args: List[Union[str, int]] = []
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 08:47:22 -06:00
|
|
|
if network_tuple:
|
|
|
|
if network_tuple.appservice_id:
|
|
|
|
published_sql = """
|
|
|
|
SELECT room_id from appservice_room_list
|
|
|
|
WHERE appservice_id = ? AND network_id = ?
|
|
|
|
"""
|
|
|
|
query_args.append(network_tuple.appservice_id)
|
2021-12-30 11:47:12 -07:00
|
|
|
assert network_tuple.network_id is not None
|
2019-10-02 08:47:22 -06:00
|
|
|
query_args.append(network_tuple.network_id)
|
|
|
|
else:
|
|
|
|
published_sql = """
|
|
|
|
SELECT room_id FROM rooms WHERE is_public
|
|
|
|
"""
|
|
|
|
else:
|
|
|
|
published_sql = """
|
|
|
|
SELECT room_id FROM rooms WHERE is_public
|
|
|
|
UNION SELECT room_id from appservice_room_list
|
|
|
|
"""
|
|
|
|
|
2019-10-02 08:09:10 -06:00
|
|
|
# Work out the bounds if we're given them, these bounds look slightly
|
|
|
|
# odd, but are designed to help query planner use indices by pulling
|
|
|
|
# out a common bound.
|
|
|
|
if bounds:
|
|
|
|
last_joined_members, last_room_id = bounds
|
2019-10-02 07:08:35 -06:00
|
|
|
if forwards:
|
2019-10-02 08:09:10 -06:00
|
|
|
where_clauses.append(
|
|
|
|
"""
|
|
|
|
joined_members <= ? AND (
|
|
|
|
joined_members < ? OR room_id < ?
|
|
|
|
)
|
|
|
|
"""
|
|
|
|
)
|
2019-10-02 07:08:35 -06:00
|
|
|
else:
|
2019-10-02 08:09:10 -06:00
|
|
|
where_clauses.append(
|
|
|
|
"""
|
|
|
|
joined_members >= ? AND (
|
|
|
|
joined_members > ? OR room_id > ?
|
|
|
|
)
|
|
|
|
"""
|
|
|
|
)
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 08:09:10 -06:00
|
|
|
query_args += [last_joined_members, last_joined_members, last_room_id]
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 08:20:36 -06:00
|
|
|
if ignore_non_federatable:
|
|
|
|
where_clauses.append("is_federatable")
|
|
|
|
|
2022-06-29 11:12:45 -06:00
|
|
|
if search_filter and search_filter.get(
|
|
|
|
PublicRoomsFilterFields.GENERIC_SEARCH_TERM, None
|
|
|
|
):
|
|
|
|
search_term = (
|
|
|
|
"%" + search_filter[PublicRoomsFilterFields.GENERIC_SEARCH_TERM] + "%"
|
|
|
|
)
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 07:08:35 -06:00
|
|
|
where_clauses.append(
|
|
|
|
"""
|
|
|
|
(
|
2019-10-29 04:24:02 -06:00
|
|
|
LOWER(name) LIKE ?
|
|
|
|
OR LOWER(topic) LIKE ?
|
|
|
|
OR LOWER(canonical_alias) LIKE ?
|
2019-10-02 07:08:35 -06:00
|
|
|
)
|
|
|
|
"""
|
2018-03-01 04:39:45 -07:00
|
|
|
)
|
2019-10-29 04:24:02 -06:00
|
|
|
query_args += [
|
|
|
|
search_term.lower(),
|
|
|
|
search_term.lower(),
|
|
|
|
search_term.lower(),
|
|
|
|
]
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2022-06-29 11:12:45 -06:00
|
|
|
room_type_clause, args = self._construct_room_type_where_clause(
|
|
|
|
search_filter.get(PublicRoomsFilterFields.ROOM_TYPES, None)
|
|
|
|
if search_filter
|
|
|
|
else None
|
|
|
|
)
|
|
|
|
if room_type_clause:
|
|
|
|
where_clauses.append(room_type_clause)
|
|
|
|
query_args += args
|
|
|
|
|
2019-10-02 07:08:35 -06:00
|
|
|
where_clause = ""
|
|
|
|
if where_clauses:
|
|
|
|
where_clause = " AND " + " AND ".join(where_clauses)
|
|
|
|
|
2022-05-24 10:50:50 -06:00
|
|
|
dir = "DESC" if forwards else "ASC"
|
|
|
|
sql = f"""
|
2019-10-02 07:08:35 -06:00
|
|
|
SELECT
|
|
|
|
room_id, name, topic, canonical_alias, joined_members,
|
2022-06-29 11:12:45 -06:00
|
|
|
avatar, history_visibility, guest_access, join_rules, room_type
|
2019-10-02 07:08:35 -06:00
|
|
|
FROM (
|
2022-05-24 10:50:50 -06:00
|
|
|
{published_sql}
|
2019-10-02 07:08:35 -06:00
|
|
|
) published
|
|
|
|
INNER JOIN room_stats_state USING (room_id)
|
|
|
|
INNER JOIN room_stats_current USING (room_id)
|
|
|
|
WHERE
|
|
|
|
(
|
2022-05-24 10:50:50 -06:00
|
|
|
join_rules = '{JoinRules.PUBLIC}'
|
|
|
|
OR join_rules = '{JoinRules.KNOCK}'
|
|
|
|
OR join_rules = '{JoinRules.KNOCK_RESTRICTED}'
|
2021-06-09 13:31:31 -06:00
|
|
|
OR history_visibility = 'world_readable'
|
2019-10-02 07:08:35 -06:00
|
|
|
)
|
|
|
|
AND joined_members > 0
|
2022-05-24 10:50:50 -06:00
|
|
|
{where_clause}
|
|
|
|
ORDER BY
|
|
|
|
joined_members {dir},
|
|
|
|
room_id {dir}
|
|
|
|
"""
|
2019-10-02 07:08:35 -06:00
|
|
|
|
|
|
|
if limit is not None:
|
|
|
|
query_args.append(limit)
|
|
|
|
|
|
|
|
sql += """
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_largest_public_rooms_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> List[Dict[str, Any]]:
|
2019-10-02 07:08:35 -06:00
|
|
|
txn.execute(sql, query_args)
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
results = self.db_pool.cursor_to_dict(txn)
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2019-10-02 07:08:35 -06:00
|
|
|
if not forwards:
|
|
|
|
results.reverse()
|
|
|
|
|
|
|
|
return results
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
ret_val = await self.db_pool.runInteraction(
|
2019-10-02 07:08:35 -06:00
|
|
|
"get_largest_public_rooms", _get_largest_public_rooms_txn
|
2018-03-01 04:39:45 -07:00
|
|
|
)
|
2020-07-30 05:20:41 -06:00
|
|
|
return ret_val
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2018-03-01 08:20:54 -07:00
|
|
|
@cached(max_entries=10000)
|
2020-08-26 05:19:32 -06:00
|
|
|
async def is_room_blocked(self, room_id: str) -> Optional[bool]:
|
|
|
|
return await self.db_pool.simple_select_one_onecol(
|
2018-03-01 08:20:54 -07:00
|
|
|
table="blocked_rooms",
|
2019-04-03 03:07:29 -06:00
|
|
|
keyvalues={"room_id": room_id},
|
2018-03-01 08:20:54 -07:00
|
|
|
retcol="1",
|
|
|
|
allow_none=True,
|
|
|
|
desc="is_room_blocked",
|
|
|
|
)
|
|
|
|
|
2021-11-18 10:43:49 -07:00
|
|
|
async def room_is_blocked_by(self, room_id: str) -> Optional[str]:
|
|
|
|
"""
|
|
|
|
Function to retrieve user who has blocked the room.
|
|
|
|
user_id is non-nullable
|
|
|
|
It returns None if the room is not blocked.
|
|
|
|
"""
|
|
|
|
return await self.db_pool.simple_select_one_onecol(
|
|
|
|
table="blocked_rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
retcol="user_id",
|
|
|
|
allow_none=True,
|
|
|
|
desc="room_is_blocked_by",
|
|
|
|
)
|
|
|
|
|
2020-01-22 06:36:43 -07:00
|
|
|
async def get_rooms_paginate(
|
|
|
|
self,
|
|
|
|
start: int,
|
|
|
|
limit: int,
|
2021-07-21 07:47:56 -06:00
|
|
|
order_by: str,
|
2020-01-22 06:36:43 -07:00
|
|
|
reverse_order: bool,
|
|
|
|
search_term: Optional[str],
|
|
|
|
) -> Tuple[List[Dict[str, Any]], int]:
|
|
|
|
"""Function to retrieve a paginated list of rooms as json.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
start: offset in the list
|
|
|
|
limit: maximum amount of rooms to retrieve
|
|
|
|
order_by: the sort order of the returned list
|
|
|
|
reverse_order: whether to reverse the room list
|
2021-11-02 04:01:13 -06:00
|
|
|
search_term: a string to filter room names,
|
|
|
|
canonical alias and room ids by.
|
|
|
|
Room ID must match exactly. Canonical alias must match a substring of the local part.
|
2020-01-22 06:36:43 -07:00
|
|
|
Returns:
|
|
|
|
A list of room dicts and an integer representing the total number of
|
|
|
|
rooms that exist given this query
|
|
|
|
"""
|
|
|
|
# Filter room names by a string
|
|
|
|
where_statement = ""
|
2021-12-15 11:00:48 -07:00
|
|
|
search_pattern: List[object] = []
|
2020-01-22 06:36:43 -07:00
|
|
|
if search_term:
|
2021-11-02 04:01:13 -06:00
|
|
|
where_statement = """
|
|
|
|
WHERE LOWER(state.name) LIKE ?
|
|
|
|
OR LOWER(state.canonical_alias) LIKE ?
|
|
|
|
OR state.room_id = ?
|
|
|
|
"""
|
2020-01-22 06:36:43 -07:00
|
|
|
|
|
|
|
# Our postgres db driver converts ? -> %s in SQL strings as that's the
|
|
|
|
# placeholder for postgres.
|
|
|
|
# HOWEVER, if you put a % into your SQL then everything goes wibbly.
|
|
|
|
# To get around this, we're going to surround search_term with %'s
|
|
|
|
# before giving it to the database in python instead
|
2021-11-02 04:01:13 -06:00
|
|
|
search_pattern = [
|
|
|
|
"%" + search_term.lower() + "%",
|
|
|
|
"#%" + search_term.lower() + "%:%",
|
|
|
|
search_term,
|
|
|
|
]
|
2020-01-22 06:36:43 -07:00
|
|
|
|
|
|
|
# Set ordering
|
|
|
|
if RoomSortOrder(order_by) == RoomSortOrder.SIZE:
|
2020-04-22 06:38:41 -06:00
|
|
|
# Deprecated in favour of RoomSortOrder.JOINED_MEMBERS
|
2020-01-22 06:36:43 -07:00
|
|
|
order_by_column = "curr.joined_members"
|
|
|
|
order_by_asc = False
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.ALPHABETICAL:
|
2020-04-22 06:38:41 -06:00
|
|
|
# Deprecated in favour of RoomSortOrder.NAME
|
2020-01-22 06:36:43 -07:00
|
|
|
order_by_column = "state.name"
|
|
|
|
order_by_asc = True
|
2020-04-22 06:38:41 -06:00
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.NAME:
|
|
|
|
order_by_column = "state.name"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.CANONICAL_ALIAS:
|
|
|
|
order_by_column = "state.canonical_alias"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.JOINED_MEMBERS:
|
|
|
|
order_by_column = "curr.joined_members"
|
|
|
|
order_by_asc = False
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.JOINED_LOCAL_MEMBERS:
|
|
|
|
order_by_column = "curr.local_users_in_room"
|
|
|
|
order_by_asc = False
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.VERSION:
|
|
|
|
order_by_column = "rooms.room_version"
|
|
|
|
order_by_asc = False
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.CREATOR:
|
|
|
|
order_by_column = "rooms.creator"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.ENCRYPTION:
|
|
|
|
order_by_column = "state.encryption"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.FEDERATABLE:
|
|
|
|
order_by_column = "state.is_federatable"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.PUBLIC:
|
|
|
|
order_by_column = "rooms.is_public"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.JOIN_RULES:
|
|
|
|
order_by_column = "state.join_rules"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.GUEST_ACCESS:
|
|
|
|
order_by_column = "state.guest_access"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.HISTORY_VISIBILITY:
|
|
|
|
order_by_column = "state.history_visibility"
|
|
|
|
order_by_asc = True
|
|
|
|
elif RoomSortOrder(order_by) == RoomSortOrder.STATE_EVENTS:
|
|
|
|
order_by_column = "curr.current_state_events"
|
|
|
|
order_by_asc = False
|
2020-01-22 06:36:43 -07:00
|
|
|
else:
|
|
|
|
raise StoreError(
|
|
|
|
500, "Incorrect value for order_by provided: %s" % order_by
|
|
|
|
)
|
|
|
|
|
|
|
|
# Whether to return the list in reverse order
|
|
|
|
if reverse_order:
|
|
|
|
# Flip the boolean
|
|
|
|
order_by_asc = not order_by_asc
|
|
|
|
|
|
|
|
# Create one query for getting the limited number of events that the user asked
|
|
|
|
# for, and another query for getting the total number of events that could be
|
|
|
|
# returned. Thus allowing us to see if there are more events to paginate through
|
|
|
|
info_sql = """
|
2020-04-22 06:38:41 -06:00
|
|
|
SELECT state.room_id, state.name, state.canonical_alias, curr.joined_members,
|
|
|
|
curr.local_users_in_room, rooms.room_version, rooms.creator,
|
|
|
|
state.encryption, state.is_federatable, rooms.is_public, state.join_rules,
|
2022-07-12 08:30:53 -06:00
|
|
|
state.guest_access, state.history_visibility, curr.current_state_events,
|
|
|
|
state.room_type
|
2020-01-22 06:36:43 -07:00
|
|
|
FROM room_stats_state state
|
|
|
|
INNER JOIN room_stats_current curr USING (room_id)
|
2020-04-22 06:38:41 -06:00
|
|
|
INNER JOIN rooms USING (room_id)
|
2022-01-17 04:42:51 -07:00
|
|
|
{where}
|
|
|
|
ORDER BY {order_by} {direction}, state.room_id {direction}
|
2020-01-22 06:36:43 -07:00
|
|
|
LIMIT ?
|
|
|
|
OFFSET ?
|
2022-01-17 04:42:51 -07:00
|
|
|
""".format(
|
|
|
|
where=where_statement,
|
|
|
|
order_by=order_by_column,
|
|
|
|
direction="ASC" if order_by_asc else "DESC",
|
2020-01-22 06:36:43 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
# Use a nested SELECT statement as SQL can't count(*) with an OFFSET
|
|
|
|
count_sql = """
|
|
|
|
SELECT count(*) FROM (
|
|
|
|
SELECT room_id FROM room_stats_state state
|
2022-01-17 04:42:51 -07:00
|
|
|
{where}
|
2020-01-22 06:36:43 -07:00
|
|
|
) AS get_room_ids
|
2022-01-17 04:42:51 -07:00
|
|
|
""".format(
|
|
|
|
where=where_statement,
|
2020-01-22 06:36:43 -07:00
|
|
|
)
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_rooms_paginate_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> Tuple[List[Dict[str, Any]], int]:
|
2021-11-02 04:01:13 -06:00
|
|
|
# Add the search term into the WHERE clause
|
|
|
|
# and execute the data query
|
|
|
|
txn.execute(info_sql, search_pattern + [limit, start])
|
2020-01-22 06:36:43 -07:00
|
|
|
|
|
|
|
# Refactor room query data into a structured dictionary
|
|
|
|
rooms = []
|
|
|
|
for room in txn:
|
|
|
|
rooms.append(
|
|
|
|
{
|
|
|
|
"room_id": room[0],
|
|
|
|
"name": room[1],
|
|
|
|
"canonical_alias": room[2],
|
|
|
|
"joined_members": room[3],
|
2020-04-22 06:38:41 -06:00
|
|
|
"joined_local_members": room[4],
|
|
|
|
"version": room[5],
|
|
|
|
"creator": room[6],
|
|
|
|
"encryption": room[7],
|
2022-08-31 04:38:16 -06:00
|
|
|
# room_stats_state.federatable is an integer on sqlite.
|
|
|
|
"federatable": bool(room[8]),
|
|
|
|
# rooms.is_public is an integer on sqlite.
|
|
|
|
"public": bool(room[9]),
|
2020-04-22 06:38:41 -06:00
|
|
|
"join_rules": room[10],
|
|
|
|
"guest_access": room[11],
|
|
|
|
"history_visibility": room[12],
|
|
|
|
"state_events": room[13],
|
2022-07-12 08:30:53 -06:00
|
|
|
"room_type": room[14],
|
2020-01-22 06:36:43 -07:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
# Execute the count query
|
|
|
|
|
|
|
|
# Add the search term into the WHERE clause if present
|
2021-11-02 04:01:13 -06:00
|
|
|
txn.execute(count_sql, search_pattern)
|
2020-01-22 06:36:43 -07:00
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
room_count = cast(Tuple[int], txn.fetchone())
|
2020-01-22 06:36:43 -07:00
|
|
|
return rooms, room_count[0]
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-01-22 06:36:43 -07:00
|
|
|
"get_rooms_paginate",
|
|
|
|
_get_rooms_paginate_txn,
|
|
|
|
)
|
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
@cached(max_entries=10000)
|
2021-04-13 03:26:37 -06:00
|
|
|
async def get_ratelimit_for_user(self, user_id: str) -> Optional[RatelimitOverride]:
|
|
|
|
"""Check if there are any overrides for ratelimiting for the given user
|
2018-08-07 03:48:31 -06:00
|
|
|
|
|
|
|
Args:
|
2021-04-13 03:26:37 -06:00
|
|
|
user_id: user ID of the user
|
2018-08-07 03:48:31 -06:00
|
|
|
Returns:
|
|
|
|
RatelimitOverride if there is an override, else None. If the contents
|
|
|
|
of RatelimitOverride are None or 0 then ratelimitng has been
|
|
|
|
disabled for that user entirely.
|
|
|
|
"""
|
2020-08-05 14:38:57 -06:00
|
|
|
row = await self.db_pool.simple_select_one(
|
2018-08-07 03:48:31 -06:00
|
|
|
table="ratelimit_override",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
retcols=("messages_per_second", "burst_count"),
|
|
|
|
allow_none=True,
|
|
|
|
desc="get_ratelimit_for_user",
|
|
|
|
)
|
|
|
|
|
|
|
|
if row:
|
2019-07-23 07:00:55 -06:00
|
|
|
return RatelimitOverride(
|
|
|
|
messages_per_second=row["messages_per_second"],
|
|
|
|
burst_count=row["burst_count"],
|
2019-04-03 03:07:29 -06:00
|
|
|
)
|
2018-08-07 03:48:31 -06:00
|
|
|
else:
|
2019-07-23 07:00:55 -06:00
|
|
|
return None
|
2018-08-07 03:48:31 -06:00
|
|
|
|
2021-04-13 03:26:37 -06:00
|
|
|
async def set_ratelimit_for_user(
|
|
|
|
self, user_id: str, messages_per_second: int, burst_count: int
|
|
|
|
) -> None:
|
|
|
|
"""Sets whether a user is set an overridden ratelimit.
|
|
|
|
Args:
|
|
|
|
user_id: user ID of the user
|
|
|
|
messages_per_second: The number of actions that can be performed in a second.
|
|
|
|
burst_count: How many actions that can be performed before being limited.
|
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def set_ratelimit_txn(txn: LoggingTransaction) -> None:
|
2021-04-13 03:26:37 -06:00
|
|
|
self.db_pool.simple_upsert_txn(
|
|
|
|
txn,
|
|
|
|
table="ratelimit_override",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
values={
|
|
|
|
"messages_per_second": messages_per_second,
|
|
|
|
"burst_count": burst_count,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.get_ratelimit_for_user, (user_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
await self.db_pool.runInteraction("set_ratelimit", set_ratelimit_txn)
|
|
|
|
|
|
|
|
async def delete_ratelimit_for_user(self, user_id: str) -> None:
|
|
|
|
"""Delete an overridden ratelimit for a user.
|
|
|
|
Args:
|
|
|
|
user_id: user ID of the user
|
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def delete_ratelimit_txn(txn: LoggingTransaction) -> None:
|
2021-04-13 03:26:37 -06:00
|
|
|
row = self.db_pool.simple_select_one_txn(
|
|
|
|
txn,
|
|
|
|
table="ratelimit_override",
|
|
|
|
keyvalues={"user_id": user_id},
|
|
|
|
retcols=["user_id"],
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
if not row:
|
|
|
|
return
|
|
|
|
|
|
|
|
# They are there, delete them.
|
|
|
|
self.db_pool.simple_delete_one_txn(
|
|
|
|
txn, "ratelimit_override", keyvalues={"user_id": user_id}
|
|
|
|
)
|
|
|
|
|
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.get_ratelimit_for_user, (user_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
await self.db_pool.runInteraction("delete_ratelimit", delete_ratelimit_txn)
|
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
@cached()
|
2022-05-23 11:18:23 -06:00
|
|
|
async def get_retention_policy_for_room(self, room_id: str) -> RetentionPolicy:
|
2019-11-19 07:40:21 -07:00
|
|
|
"""Get the retention policy for a given room.
|
|
|
|
|
|
|
|
If no retention policy has been found for this room, returns a policy defined
|
|
|
|
by the configured default policy (which has None as both the 'min_lifetime' and
|
|
|
|
the 'max_lifetime' if no default policy has been defined in the server's
|
|
|
|
configuration).
|
|
|
|
|
2022-05-23 11:18:23 -06:00
|
|
|
If support for retention policies is disabled, a policy with a 'min_lifetime' and
|
|
|
|
'max_lifetime' of None is returned.
|
|
|
|
|
2019-11-19 07:40:21 -07:00
|
|
|
Args:
|
2021-12-15 11:00:48 -07:00
|
|
|
room_id: The ID of the room to get the retention policy of.
|
2019-11-19 07:40:21 -07:00
|
|
|
|
|
|
|
Returns:
|
2021-12-15 11:00:48 -07:00
|
|
|
A dict containing "min_lifetime" and "max_lifetime" for this room.
|
2019-11-19 07:40:21 -07:00
|
|
|
"""
|
2022-05-23 11:18:23 -06:00
|
|
|
# If the room retention feature is disabled, return a policy with no minimum nor
|
|
|
|
# maximum. This prevents incorrectly filtering out events when sending to
|
|
|
|
# the client.
|
|
|
|
if not self.config.retention.retention_enabled:
|
|
|
|
return RetentionPolicy()
|
2019-11-19 07:40:21 -07:00
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def get_retention_policy_for_room_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> List[Dict[str, Optional[int]]]:
|
2019-11-19 07:40:21 -07:00
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT min_lifetime, max_lifetime FROM room_retention
|
|
|
|
INNER JOIN current_state_events USING (event_id, room_id)
|
|
|
|
WHERE room_id = ?;
|
|
|
|
""",
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
return self.db_pool.cursor_to_dict(txn)
|
2019-11-19 07:40:21 -07:00
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
ret = await self.db_pool.runInteraction(
|
2019-11-19 07:40:21 -07:00
|
|
|
"get_retention_policy_for_room",
|
|
|
|
get_retention_policy_for_room_txn,
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we don't know this room ID, ret will be None, in this case return the default
|
|
|
|
# policy.
|
|
|
|
if not ret:
|
2022-05-23 11:18:23 -06:00
|
|
|
return RetentionPolicy(
|
|
|
|
min_lifetime=self.config.retention.retention_default_min_lifetime,
|
|
|
|
max_lifetime=self.config.retention.retention_default_max_lifetime,
|
|
|
|
)
|
2019-11-19 07:40:21 -07:00
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
min_lifetime = ret[0]["min_lifetime"]
|
|
|
|
max_lifetime = ret[0]["max_lifetime"]
|
2019-11-19 07:40:21 -07:00
|
|
|
|
|
|
|
# If one of the room's policy's attributes isn't defined, use the matching
|
|
|
|
# attribute from the default policy.
|
|
|
|
# The default values will be None if no default policy has been defined, or if one
|
|
|
|
# of the attributes is missing from the default policy.
|
2021-12-15 11:00:48 -07:00
|
|
|
if min_lifetime is None:
|
|
|
|
min_lifetime = self.config.retention.retention_default_min_lifetime
|
2019-11-19 07:40:21 -07:00
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
if max_lifetime is None:
|
|
|
|
max_lifetime = self.config.retention.retention_default_max_lifetime
|
2019-11-19 07:40:21 -07:00
|
|
|
|
2022-05-23 11:18:23 -06:00
|
|
|
return RetentionPolicy(
|
|
|
|
min_lifetime=min_lifetime,
|
|
|
|
max_lifetime=max_lifetime,
|
|
|
|
)
|
2019-11-19 07:40:21 -07:00
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
async def get_media_mxcs_in_room(self, room_id: str) -> Tuple[List[str], List[str]]:
|
2020-01-08 06:20:43 -07:00
|
|
|
"""Retrieves all the local and remote media MXC URIs in a given room
|
|
|
|
|
|
|
|
Args:
|
2020-09-01 06:39:04 -06:00
|
|
|
room_id
|
2020-01-08 06:20:43 -07:00
|
|
|
|
|
|
|
Returns:
|
2020-09-01 06:39:04 -06:00
|
|
|
The local and remote media as a lists of the media IDs.
|
2020-01-08 06:20:43 -07:00
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_media_mxcs_in_room_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> Tuple[List[str], List[str]]:
|
2020-01-08 06:20:43 -07:00
|
|
|
local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id)
|
|
|
|
local_media_mxcs = []
|
|
|
|
remote_media_mxcs = []
|
|
|
|
|
|
|
|
# Convert the IDs to MXC URIs
|
|
|
|
for media_id in local_mxcs:
|
|
|
|
local_media_mxcs.append("mxc://%s/%s" % (self.hs.hostname, media_id))
|
|
|
|
for hostname, media_id in remote_mxcs:
|
|
|
|
remote_media_mxcs.append("mxc://%s/%s" % (hostname, media_id))
|
|
|
|
|
|
|
|
return local_media_mxcs, remote_media_mxcs
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-01-08 06:20:43 -07:00
|
|
|
"get_media_ids_in_room", _get_media_mxcs_in_room_txn
|
|
|
|
)
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
async def quarantine_media_ids_in_room(
|
|
|
|
self, room_id: str, quarantined_by: str
|
|
|
|
) -> int:
|
2020-01-08 06:20:43 -07:00
|
|
|
"""For a room loops through all events with media and quarantines
|
|
|
|
the associated media
|
|
|
|
"""
|
|
|
|
|
2020-01-13 11:10:43 -07:00
|
|
|
logger.info("Quarantining media in room: %s", room_id)
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _quarantine_media_in_room_txn(txn: LoggingTransaction) -> int:
|
2020-01-08 06:20:43 -07:00
|
|
|
local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id)
|
2020-06-22 06:04:14 -06:00
|
|
|
return self._quarantine_media_txn(
|
|
|
|
txn, local_mxcs, remote_mxcs, quarantined_by
|
2020-01-08 06:20:43 -07:00
|
|
|
)
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-01-08 06:20:43 -07:00
|
|
|
"quarantine_media_in_room", _quarantine_media_in_room_txn
|
|
|
|
)
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_media_mxcs_in_room_txn(
|
|
|
|
self, txn: LoggingTransaction, room_id: str
|
|
|
|
) -> Tuple[List[str], List[Tuple[str, str]]]:
|
2020-01-08 06:20:43 -07:00
|
|
|
"""Retrieves all the local and remote media MXC URIs in a given room
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The local and remote media as a lists of tuples where the key is
|
|
|
|
the hostname and the value is the media ID.
|
|
|
|
"""
|
2020-01-08 07:57:45 -07:00
|
|
|
sql = """
|
|
|
|
SELECT stream_ordering, json FROM events
|
|
|
|
JOIN event_json USING (room_id, event_id)
|
|
|
|
WHERE room_id = ?
|
|
|
|
%(where_clause)s
|
|
|
|
AND contains_url = ? AND outlier = ?
|
|
|
|
ORDER BY stream_ordering DESC
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql % {"where_clause": ""}, (room_id, True, False, 100))
|
|
|
|
|
2020-01-08 06:20:43 -07:00
|
|
|
local_media_mxcs = []
|
|
|
|
remote_media_mxcs = []
|
|
|
|
|
2020-01-08 07:23:27 -07:00
|
|
|
while True:
|
2020-01-08 06:20:43 -07:00
|
|
|
next_token = None
|
|
|
|
for stream_ordering, content_json in txn:
|
|
|
|
next_token = stream_ordering
|
2020-07-16 09:32:19 -06:00
|
|
|
event_json = db_to_json(content_json)
|
2020-01-08 06:20:43 -07:00
|
|
|
content = event_json["content"]
|
|
|
|
content_url = content.get("url")
|
2022-11-24 03:49:04 -07:00
|
|
|
info = content.get("info")
|
|
|
|
if isinstance(info, dict):
|
|
|
|
thumbnail_url = info.get("thumbnail_url")
|
|
|
|
else:
|
|
|
|
thumbnail_url = None
|
2020-01-08 06:20:43 -07:00
|
|
|
|
|
|
|
for url in (content_url, thumbnail_url):
|
|
|
|
if not url:
|
|
|
|
continue
|
2021-01-20 06:15:14 -07:00
|
|
|
matches = MXC_REGEX.match(url)
|
2020-01-08 06:20:43 -07:00
|
|
|
if matches:
|
|
|
|
hostname = matches.group(1)
|
|
|
|
media_id = matches.group(2)
|
|
|
|
if hostname == self.hs.hostname:
|
|
|
|
local_media_mxcs.append(media_id)
|
|
|
|
else:
|
|
|
|
remote_media_mxcs.append((hostname, media_id))
|
|
|
|
|
2020-01-08 07:23:27 -07:00
|
|
|
if next_token is None:
|
2020-01-08 07:27:35 -07:00
|
|
|
# We've gone through the whole room, so we're finished.
|
2020-01-08 07:23:27 -07:00
|
|
|
break
|
|
|
|
|
2020-01-08 07:57:45 -07:00
|
|
|
txn.execute(
|
|
|
|
sql % {"where_clause": "AND stream_ordering < ?"},
|
|
|
|
(room_id, next_token, True, False, 100),
|
|
|
|
)
|
|
|
|
|
2020-01-08 06:20:43 -07:00
|
|
|
return local_media_mxcs, remote_media_mxcs
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
async def quarantine_media_by_id(
|
2020-01-13 11:10:43 -07:00
|
|
|
self,
|
|
|
|
server_name: str,
|
|
|
|
media_id: str,
|
2021-06-02 11:50:35 -06:00
|
|
|
quarantined_by: Optional[str],
|
2020-09-01 06:39:04 -06:00
|
|
|
) -> int:
|
2021-06-02 11:50:35 -06:00
|
|
|
"""quarantines or unquarantines a single local or remote media id
|
2020-01-13 11:10:43 -07:00
|
|
|
|
|
|
|
Args:
|
|
|
|
server_name: The name of the server that holds this media
|
|
|
|
media_id: The ID of the media to be quarantined
|
|
|
|
quarantined_by: The user ID that initiated the quarantine request
|
2021-06-02 11:50:35 -06:00
|
|
|
If it is `None` media will be removed from quarantine
|
2020-01-13 11:10:43 -07:00
|
|
|
"""
|
|
|
|
logger.info("Quarantining media: %s/%s", server_name, media_id)
|
2021-09-13 11:07:12 -06:00
|
|
|
is_local = server_name == self.config.server.server_name
|
2020-01-13 11:10:43 -07:00
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _quarantine_media_by_id_txn(txn: LoggingTransaction) -> int:
|
2020-01-13 11:10:43 -07:00
|
|
|
local_mxcs = [media_id] if is_local else []
|
|
|
|
remote_mxcs = [(server_name, media_id)] if not is_local else []
|
|
|
|
|
|
|
|
return self._quarantine_media_txn(
|
|
|
|
txn, local_mxcs, remote_mxcs, quarantined_by
|
|
|
|
)
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-01-13 11:10:43 -07:00
|
|
|
"quarantine_media_by_user", _quarantine_media_by_id_txn
|
|
|
|
)
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
async def quarantine_media_ids_by_user(
|
|
|
|
self, user_id: str, quarantined_by: str
|
|
|
|
) -> int:
|
2020-01-13 11:10:43 -07:00
|
|
|
"""quarantines all local media associated with a single user
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: The ID of the user to quarantine media of
|
|
|
|
quarantined_by: The ID of the user who made the quarantine request
|
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _quarantine_media_by_user_txn(txn: LoggingTransaction) -> int:
|
2020-01-13 11:10:43 -07:00
|
|
|
local_media_ids = self._get_media_ids_by_user_txn(txn, user_id)
|
|
|
|
return self._quarantine_media_txn(txn, local_media_ids, [], quarantined_by)
|
|
|
|
|
2020-09-01 06:39:04 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-01-13 11:10:43 -07:00
|
|
|
"quarantine_media_by_user", _quarantine_media_by_user_txn
|
|
|
|
)
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_media_ids_by_user_txn(
|
|
|
|
self, txn: LoggingTransaction, user_id: str, filter_quarantined: bool = True
|
|
|
|
) -> List[str]:
|
2020-01-13 11:10:43 -07:00
|
|
|
"""Retrieves local media IDs by a given user
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (cursor)
|
|
|
|
user_id: The ID of the user to retrieve media IDs of
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The local and remote media as a lists of tuples where the key is
|
|
|
|
the hostname and the value is the media ID.
|
|
|
|
"""
|
|
|
|
# Local media
|
|
|
|
sql = """
|
|
|
|
SELECT media_id
|
|
|
|
FROM local_media_repository
|
|
|
|
WHERE user_id = ?
|
|
|
|
"""
|
|
|
|
if filter_quarantined:
|
|
|
|
sql += "AND quarantined_by IS NULL"
|
|
|
|
txn.execute(sql, (user_id,))
|
|
|
|
|
|
|
|
local_media_ids = [row[0] for row in txn]
|
|
|
|
|
|
|
|
# TODO: Figure out all remote media a user has referenced in a message
|
|
|
|
|
|
|
|
return local_media_ids
|
|
|
|
|
|
|
|
def _quarantine_media_txn(
|
|
|
|
self,
|
2021-12-15 11:00:48 -07:00
|
|
|
txn: LoggingTransaction,
|
2020-01-13 11:10:43 -07:00
|
|
|
local_mxcs: List[str],
|
|
|
|
remote_mxcs: List[Tuple[str, str]],
|
2021-06-02 11:50:35 -06:00
|
|
|
quarantined_by: Optional[str],
|
2020-01-13 11:10:43 -07:00
|
|
|
) -> int:
|
2021-06-02 11:50:35 -06:00
|
|
|
"""Quarantine and unquarantine local and remote media items
|
2020-01-13 11:10:43 -07:00
|
|
|
|
|
|
|
Args:
|
|
|
|
txn (cursor)
|
|
|
|
local_mxcs: A list of local mxc URLs
|
|
|
|
remote_mxcs: A list of (remote server, media id) tuples representing
|
|
|
|
remote mxc URLs
|
|
|
|
quarantined_by: The ID of the user who initiated the quarantine request
|
2021-06-02 11:50:35 -06:00
|
|
|
If it is `None` media will be removed from quarantine
|
2020-01-13 11:10:43 -07:00
|
|
|
Returns:
|
|
|
|
The total number of media items quarantined
|
|
|
|
"""
|
2021-06-02 11:50:35 -06:00
|
|
|
|
2020-01-13 11:10:43 -07:00
|
|
|
# Update all the tables to set the quarantined_by flag
|
2021-06-02 11:50:35 -06:00
|
|
|
sql = """
|
2020-01-13 11:10:43 -07:00
|
|
|
UPDATE local_media_repository
|
|
|
|
SET quarantined_by = ?
|
2021-06-02 11:50:35 -06:00
|
|
|
WHERE media_id = ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
# set quarantine
|
|
|
|
if quarantined_by is not None:
|
|
|
|
sql += "AND safe_from_quarantine = ?"
|
2021-12-15 11:00:48 -07:00
|
|
|
txn.executemany(
|
|
|
|
sql, [(quarantined_by, media_id, False) for media_id in local_mxcs]
|
|
|
|
)
|
2021-06-02 11:50:35 -06:00
|
|
|
# remove from quarantine
|
|
|
|
else:
|
2021-12-15 11:00:48 -07:00
|
|
|
txn.executemany(
|
|
|
|
sql, [(quarantined_by, media_id) for media_id in local_mxcs]
|
|
|
|
)
|
2021-06-02 11:50:35 -06:00
|
|
|
|
2020-06-22 06:04:14 -06:00
|
|
|
# Note that a rowcount of -1 can be used to indicate no rows were affected.
|
|
|
|
total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0
|
2020-01-13 11:10:43 -07:00
|
|
|
|
|
|
|
txn.executemany(
|
|
|
|
"""
|
|
|
|
UPDATE remote_media_cache
|
|
|
|
SET quarantined_by = ?
|
|
|
|
WHERE media_origin = ? AND media_id = ?
|
|
|
|
""",
|
|
|
|
((quarantined_by, origin, media_id) for origin, media_id in remote_mxcs),
|
|
|
|
)
|
2020-06-22 06:04:14 -06:00
|
|
|
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
|
2020-01-13 11:10:43 -07:00
|
|
|
|
|
|
|
return total_media_quarantined
|
|
|
|
|
2020-10-13 06:20:32 -06:00
|
|
|
async def get_rooms_for_retention_period_in_range(
|
|
|
|
self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False
|
2022-05-23 11:18:23 -06:00
|
|
|
) -> Dict[str, RetentionPolicy]:
|
2020-10-13 06:20:32 -06:00
|
|
|
"""Retrieves all of the rooms within the given retention range.
|
|
|
|
|
|
|
|
Optionally includes the rooms which don't have a retention policy.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
min_ms: Duration in milliseconds that define the lower limit of
|
|
|
|
the range to handle (exclusive). If None, doesn't set a lower limit.
|
|
|
|
max_ms: Duration in milliseconds that define the upper limit of
|
|
|
|
the range to handle (inclusive). If None, doesn't set an upper limit.
|
|
|
|
include_null: Whether to include rooms which retention policy is NULL
|
|
|
|
in the returned set.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The rooms within this range, along with their retention
|
|
|
|
policy. The key is "room_id", and maps to a dict describing the retention
|
|
|
|
policy associated with this room ID. The keys for this nested dict are
|
|
|
|
"min_lifetime" (int|None), and "max_lifetime" (int|None).
|
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def get_rooms_for_retention_period_in_range_txn(
|
|
|
|
txn: LoggingTransaction,
|
2022-05-23 11:18:23 -06:00
|
|
|
) -> Dict[str, RetentionPolicy]:
|
2020-10-13 06:20:32 -06:00
|
|
|
range_conditions = []
|
|
|
|
args = []
|
|
|
|
|
|
|
|
if min_ms is not None:
|
|
|
|
range_conditions.append("max_lifetime > ?")
|
|
|
|
args.append(min_ms)
|
|
|
|
|
|
|
|
if max_ms is not None:
|
|
|
|
range_conditions.append("max_lifetime <= ?")
|
|
|
|
args.append(max_ms)
|
|
|
|
|
|
|
|
# Do a first query which will retrieve the rooms that have a retention policy
|
|
|
|
# in their current state.
|
|
|
|
sql = """
|
|
|
|
SELECT room_id, min_lifetime, max_lifetime FROM room_retention
|
|
|
|
INNER JOIN current_state_events USING (event_id, room_id)
|
|
|
|
"""
|
|
|
|
|
|
|
|
if len(range_conditions):
|
|
|
|
sql += " WHERE (" + " AND ".join(range_conditions) + ")"
|
|
|
|
|
|
|
|
if include_null:
|
|
|
|
sql += " OR max_lifetime IS NULL"
|
|
|
|
|
|
|
|
txn.execute(sql, args)
|
|
|
|
|
|
|
|
rows = self.db_pool.cursor_to_dict(txn)
|
|
|
|
rooms_dict = {}
|
|
|
|
|
|
|
|
for row in rows:
|
2022-05-23 11:18:23 -06:00
|
|
|
rooms_dict[row["room_id"]] = RetentionPolicy(
|
|
|
|
min_lifetime=row["min_lifetime"],
|
|
|
|
max_lifetime=row["max_lifetime"],
|
|
|
|
)
|
2020-10-13 06:20:32 -06:00
|
|
|
|
|
|
|
if include_null:
|
|
|
|
# If required, do a second query that retrieves all of the rooms we know
|
|
|
|
# of so we can handle rooms with no retention policy.
|
|
|
|
sql = "SELECT DISTINCT room_id FROM current_state_events"
|
|
|
|
|
|
|
|
txn.execute(sql)
|
|
|
|
|
|
|
|
rows = self.db_pool.cursor_to_dict(txn)
|
|
|
|
|
|
|
|
# If a room isn't already in the dict (i.e. it doesn't have a retention
|
|
|
|
# policy in its state), add it with a null policy.
|
|
|
|
for row in rows:
|
|
|
|
if row["room_id"] not in rooms_dict:
|
2022-05-23 11:18:23 -06:00
|
|
|
rooms_dict[row["room_id"]] = RetentionPolicy()
|
2020-10-13 06:20:32 -06:00
|
|
|
|
|
|
|
return rooms_dict
|
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"get_rooms_for_retention_period_in_range",
|
|
|
|
get_rooms_for_retention_period_in_range_txn,
|
|
|
|
)
|
|
|
|
|
2022-10-03 07:13:11 -06:00
|
|
|
@cached(iterable=True)
|
2022-09-23 06:44:03 -06:00
|
|
|
async def get_partial_state_servers_at_join(self, room_id: str) -> Sequence[str]:
|
|
|
|
"""Gets the list of servers in a partial state room at the time we joined it.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The `servers_in_room` list from the `/send_join` response for partial state
|
|
|
|
rooms. May not be accurate or complete, as it comes from a remote
|
|
|
|
homeserver.
|
|
|
|
An empty list for full state rooms.
|
|
|
|
"""
|
|
|
|
return await self.db_pool.simple_select_onecol(
|
|
|
|
"partial_state_rooms_servers",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
retcol="server_name",
|
|
|
|
desc="get_partial_state_servers_at_join",
|
|
|
|
)
|
|
|
|
|
2022-10-18 05:33:18 -06:00
|
|
|
async def get_partial_state_room_resync_info(
|
2022-05-31 09:15:08 -06:00
|
|
|
self,
|
2022-10-18 05:33:18 -06:00
|
|
|
) -> Mapping[str, PartialStateResyncInfo]:
|
|
|
|
"""Get all rooms containing events with partial state, and the information
|
|
|
|
needed to restart a "resync" of those rooms.
|
2022-05-31 09:15:08 -06:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
A dictionary of rooms with partial state, with room IDs as keys and
|
|
|
|
lists of servers in rooms as values.
|
|
|
|
"""
|
2022-10-18 05:33:18 -06:00
|
|
|
room_servers: Dict[str, PartialStateResyncInfo] = {}
|
|
|
|
|
|
|
|
rows = await self.db_pool.simple_select_list(
|
|
|
|
table="partial_state_rooms",
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("room_id", "joined_via"),
|
|
|
|
desc="get_server_which_served_partial_join",
|
|
|
|
)
|
|
|
|
|
|
|
|
for row in rows:
|
|
|
|
room_id = row["room_id"]
|
|
|
|
joined_via = row["joined_via"]
|
|
|
|
room_servers[room_id] = PartialStateResyncInfo(joined_via=joined_via)
|
2022-05-31 09:15:08 -06:00
|
|
|
|
|
|
|
rows = await self.db_pool.simple_select_list(
|
|
|
|
"partial_state_rooms_servers",
|
|
|
|
keyvalues=None,
|
|
|
|
retcols=("room_id", "server_name"),
|
|
|
|
desc="get_partial_state_rooms",
|
|
|
|
)
|
|
|
|
|
|
|
|
for row in rows:
|
|
|
|
room_id = row["room_id"]
|
|
|
|
server_name = row["server_name"]
|
2022-10-18 05:33:18 -06:00
|
|
|
entry = room_servers.get(room_id)
|
|
|
|
if entry is None:
|
|
|
|
# There is a foreign key constraint which enforces that every room_id in
|
|
|
|
# partial_state_rooms_servers appears in partial_state_rooms. So we
|
|
|
|
# expect `entry` to be non-null. (This reasoning fails if we've
|
|
|
|
# partial-joined between the two SELECTs, but this is unlikely to happen
|
|
|
|
# in practice.)
|
|
|
|
continue
|
|
|
|
entry.servers_in_room.append(server_name)
|
2022-05-31 09:15:08 -06:00
|
|
|
|
|
|
|
return room_servers
|
|
|
|
|
2022-09-01 09:07:01 -06:00
|
|
|
@cached()
|
2022-06-01 09:02:53 -06:00
|
|
|
async def is_partial_state_room(self, room_id: str) -> bool:
|
|
|
|
"""Checks if this room has partial state.
|
|
|
|
|
|
|
|
Returns true if this is a "partial-state" room, which means that the state
|
|
|
|
at events in the room, and `current_state_events`, may not yet be
|
|
|
|
complete.
|
|
|
|
"""
|
|
|
|
|
|
|
|
entry = await self.db_pool.simple_select_one_onecol(
|
|
|
|
table="partial_state_rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
retcol="room_id",
|
|
|
|
allow_none=True,
|
|
|
|
desc="is_partial_state_room",
|
|
|
|
)
|
|
|
|
|
|
|
|
return entry is not None
|
|
|
|
|
2022-09-28 16:22:35 -06:00
|
|
|
async def get_join_event_id_and_device_lists_stream_id_for_partial_state(
|
|
|
|
self, room_id: str
|
|
|
|
) -> Tuple[str, int]:
|
|
|
|
"""Get the event ID of the initial join that started the partial
|
|
|
|
join, and the device list stream ID at the point we started the partial
|
|
|
|
join.
|
|
|
|
"""
|
|
|
|
|
|
|
|
result = await self.db_pool.simple_select_one(
|
|
|
|
table="partial_state_rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
retcols=("join_event_id", "device_lists_stream_id"),
|
|
|
|
desc="get_join_event_id_for_partial_state",
|
|
|
|
)
|
|
|
|
return result["join_event_id"], result["device_lists_stream_id"]
|
|
|
|
|
2022-12-05 06:07:55 -07:00
|
|
|
def get_un_partial_stated_rooms_token(self) -> int:
|
|
|
|
# TODO(faster_joins, multiple writers): This is inappropriate if there
|
|
|
|
# are multiple writers because workers that don't write often will
|
|
|
|
# hold all readers up.
|
|
|
|
# (See `MultiWriterIdGenerator.get_persisted_upto_position` for an
|
|
|
|
# explanation.)
|
|
|
|
return self._un_partial_stated_rooms_stream_id_gen.get_current_token()
|
|
|
|
|
|
|
|
async def get_un_partial_stated_rooms_from_stream(
|
|
|
|
self, instance_name: str, last_id: int, current_id: int, limit: int
|
|
|
|
) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]:
|
|
|
|
"""Get updates for caches replication stream.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
instance_name: The writer we want to fetch updates from. Unused
|
|
|
|
here since there is only ever one writer.
|
|
|
|
last_id: The token to fetch updates from. Exclusive.
|
|
|
|
current_id: The token to fetch updates up to. Inclusive.
|
|
|
|
limit: The requested limit for the number of rows to return. The
|
|
|
|
function may return more or fewer rows.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple consisting of: the updates, a token to use to fetch
|
|
|
|
subsequent updates, and whether we returned fewer rows than exists
|
|
|
|
between the requested tokens due to the limit.
|
|
|
|
|
|
|
|
The token returned can be used in a subsequent call to this
|
|
|
|
function to get further updatees.
|
|
|
|
|
|
|
|
The updates are a list of 2-tuples of stream ID and the row data
|
|
|
|
"""
|
|
|
|
|
|
|
|
if last_id == current_id:
|
|
|
|
return [], current_id, False
|
|
|
|
|
|
|
|
def get_un_partial_stated_rooms_from_stream_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]:
|
|
|
|
sql = """
|
|
|
|
SELECT stream_id, room_id
|
|
|
|
FROM un_partial_stated_room_stream
|
|
|
|
WHERE ? < stream_id AND stream_id <= ? AND instance_name = ?
|
|
|
|
ORDER BY stream_id ASC
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (last_id, current_id, instance_name, limit))
|
|
|
|
updates = [(row[0], (row[1],)) for row in txn]
|
|
|
|
limited = False
|
|
|
|
upto_token = current_id
|
|
|
|
if len(updates) >= limit:
|
|
|
|
upto_token = updates[-1][0]
|
|
|
|
limited = True
|
|
|
|
|
|
|
|
return updates, upto_token, limited
|
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"get_un_partial_stated_rooms_from_stream",
|
|
|
|
get_un_partial_stated_rooms_from_stream_txn,
|
|
|
|
)
|
|
|
|
|
2018-03-01 04:39:45 -07:00
|
|
|
|
2021-07-12 10:22:54 -06:00
|
|
|
class _BackgroundUpdates:
|
2020-01-07 07:18:43 -07:00
|
|
|
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
|
2020-01-27 07:30:57 -07:00
|
|
|
ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column"
|
2021-07-12 10:22:54 -06:00
|
|
|
POPULATE_ROOM_DEPTH_MIN_DEPTH2 = "populate_room_depth_min_depth2"
|
|
|
|
REPLACE_ROOM_DEPTH_MIN_DEPTH = "replace_room_depth_min_depth"
|
2021-09-01 09:27:58 -06:00
|
|
|
POPULATE_ROOMS_CREATOR_COLUMN = "populate_rooms_creator_column"
|
2022-06-29 11:12:45 -06:00
|
|
|
ADD_ROOM_TYPE_COLUMN = "add_room_type_column"
|
2021-07-12 10:22:54 -06:00
|
|
|
|
|
|
|
|
|
|
|
_REPLACE_ROOM_DEPTH_SQL_COMMANDS = (
|
|
|
|
"DROP TRIGGER populate_min_depth2_trigger ON room_depth",
|
|
|
|
"DROP FUNCTION populate_min_depth2()",
|
|
|
|
"ALTER TABLE room_depth DROP COLUMN min_depth",
|
|
|
|
"ALTER TABLE room_depth RENAME COLUMN min_depth2 TO min_depth",
|
|
|
|
)
|
|
|
|
|
2020-01-07 07:18:43 -07:00
|
|
|
|
2021-07-12 10:22:54 -06:00
|
|
|
class RoomBackgroundUpdateStore(SQLBaseStore):
|
2021-12-13 10:05:00 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2020-09-18 07:56:44 -06:00
|
|
|
super().__init__(database, db_conn, hs)
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
2019-11-04 10:09:22 -07:00
|
|
|
"insert_room_retention",
|
|
|
|
self._background_insert_retention,
|
|
|
|
)
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
2021-07-12 10:22:54 -06:00
|
|
|
_BackgroundUpdates.REMOVE_TOMESTONED_ROOMS_BG_UPDATE,
|
2020-01-07 07:18:43 -07:00
|
|
|
self._remove_tombstoned_rooms_from_directory,
|
|
|
|
)
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
2021-07-12 10:22:54 -06:00
|
|
|
_BackgroundUpdates.ADD_ROOMS_ROOM_VERSION_COLUMN,
|
2020-01-27 07:30:57 -07:00
|
|
|
self._background_add_rooms_room_version_column,
|
|
|
|
)
|
|
|
|
|
2022-06-29 11:12:45 -06:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
|
|
|
_BackgroundUpdates.ADD_ROOM_TYPE_COLUMN,
|
|
|
|
self._background_add_room_type_column,
|
|
|
|
)
|
|
|
|
|
2021-07-12 10:22:54 -06:00
|
|
|
# BG updates to change the type of room_depth.min_depth
|
|
|
|
self.db_pool.updates.register_background_update_handler(
|
|
|
|
_BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2,
|
|
|
|
self._background_populate_room_depth_min_depth2,
|
|
|
|
)
|
|
|
|
self.db_pool.updates.register_background_update_handler(
|
|
|
|
_BackgroundUpdates.REPLACE_ROOM_DEPTH_MIN_DEPTH,
|
|
|
|
self._background_replace_room_depth_min_depth,
|
|
|
|
)
|
|
|
|
|
2021-09-01 09:27:58 -06:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
|
|
|
_BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,
|
|
|
|
self._background_populate_rooms_creator_column,
|
|
|
|
)
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
async def _background_insert_retention(
|
|
|
|
self, progress: JsonDict, batch_size: int
|
|
|
|
) -> int:
|
2019-11-04 10:09:22 -07:00
|
|
|
"""Retrieves a list of all rooms within a range and inserts an entry for each of
|
|
|
|
them into the room_retention table.
|
|
|
|
NULLs the property's columns if missing from the retention event in the room's
|
|
|
|
state (or NULLs all of them if there's no retention event in the room's state),
|
|
|
|
so that we fall back to the server's retention policy.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_room = progress.get("room_id", "")
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _background_insert_retention_txn(txn: LoggingTransaction) -> bool:
|
2019-11-04 10:09:22 -07:00
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT state.room_id, state.event_id, events.json
|
|
|
|
FROM current_state_events as state
|
|
|
|
LEFT JOIN event_json AS events ON (state.event_id = events.event_id)
|
|
|
|
WHERE state.room_id > ? AND state.type = '%s'
|
|
|
|
ORDER BY state.room_id ASC
|
|
|
|
LIMIT ?;
|
2019-11-19 06:22:37 -07:00
|
|
|
"""
|
|
|
|
% EventTypes.Retention,
|
|
|
|
(last_room, batch_size),
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
rows = self.db_pool.cursor_to_dict(txn)
|
2019-11-04 10:09:22 -07:00
|
|
|
|
|
|
|
if not rows:
|
|
|
|
return True
|
|
|
|
|
|
|
|
for row in rows:
|
|
|
|
if not row["json"]:
|
|
|
|
retention_policy = {}
|
|
|
|
else:
|
2020-07-16 09:32:19 -06:00
|
|
|
ev = db_to_json(row["json"])
|
2020-07-14 13:51:13 -06:00
|
|
|
retention_policy = ev["content"]
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
self.db_pool.simple_insert_txn(
|
2019-11-04 10:09:22 -07:00
|
|
|
txn=txn,
|
|
|
|
table="room_retention",
|
|
|
|
values={
|
|
|
|
"room_id": row["room_id"],
|
|
|
|
"event_id": row["event_id"],
|
|
|
|
"min_lifetime": retention_policy.get("min_lifetime"),
|
|
|
|
"max_lifetime": retention_policy.get("max_lifetime"),
|
2019-11-19 06:22:37 -07:00
|
|
|
},
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("Inserted %d rows into room_retention", len(rows))
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
2019-11-19 06:22:37 -07:00
|
|
|
txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]}
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
if batch_size > len(rows):
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
end = await self.db_pool.runInteraction(
|
2019-11-19 06:22:37 -07:00
|
|
|
"insert_room_retention",
|
|
|
|
_background_insert_retention_txn,
|
2019-11-04 10:09:22 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
if end:
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.updates._end_background_update("insert_room_retention")
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
return batch_size
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2020-01-27 07:30:57 -07:00
|
|
|
async def _background_add_rooms_room_version_column(
|
2021-12-15 11:00:48 -07:00
|
|
|
self, progress: JsonDict, batch_size: int
|
|
|
|
) -> int:
|
2021-02-12 09:01:48 -07:00
|
|
|
"""Background update to go and add room version information to `rooms`
|
2020-01-27 07:30:57 -07:00
|
|
|
table from `current_state_events` table.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_room_id = progress.get("room_id", "")
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _background_add_rooms_room_version_column_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> bool:
|
2020-01-27 07:30:57 -07:00
|
|
|
sql = """
|
|
|
|
SELECT room_id, json FROM current_state_events
|
|
|
|
INNER JOIN event_json USING (room_id, event_id)
|
|
|
|
WHERE room_id > ? AND type = 'm.room.create' AND state_key = ''
|
|
|
|
ORDER BY room_id
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (last_room_id, batch_size))
|
|
|
|
|
|
|
|
updates = []
|
|
|
|
for room_id, event_json in txn:
|
2020-07-16 09:32:19 -06:00
|
|
|
event_dict = db_to_json(event_json)
|
2020-01-27 07:30:57 -07:00
|
|
|
room_version_id = event_dict.get("content", {}).get(
|
|
|
|
"room_version", RoomVersions.V1.identifier
|
|
|
|
)
|
|
|
|
|
|
|
|
creator = event_dict.get("content").get("creator")
|
|
|
|
|
|
|
|
updates.append((room_id, creator, room_version_id))
|
|
|
|
|
|
|
|
if not updates:
|
|
|
|
return True
|
|
|
|
|
|
|
|
new_last_room_id = ""
|
|
|
|
for room_id, creator, room_version_id in updates:
|
|
|
|
# We upsert here just in case we don't already have a row,
|
|
|
|
# mainly for paranoia as much badness would happen if we don't
|
|
|
|
# insert the row and then try and get the room version for the
|
|
|
|
# room.
|
2020-08-05 14:38:57 -06:00
|
|
|
self.db_pool.simple_upsert_txn(
|
2020-01-27 07:30:57 -07:00
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
values={"room_version": room_version_id},
|
|
|
|
insertion_values={"is_public": False, "creator": creator},
|
|
|
|
)
|
|
|
|
new_last_room_id = room_id
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
2021-07-12 10:22:54 -06:00
|
|
|
txn,
|
|
|
|
_BackgroundUpdates.ADD_ROOMS_ROOM_VERSION_COLUMN,
|
|
|
|
{"room_id": new_last_room_id},
|
2020-01-27 07:30:57 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
end = await self.db_pool.runInteraction(
|
2020-01-27 07:30:57 -07:00
|
|
|
"_background_add_rooms_room_version_column",
|
|
|
|
_background_add_rooms_room_version_column_txn,
|
|
|
|
)
|
|
|
|
|
|
|
|
if end:
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.updates._end_background_update(
|
2021-07-12 10:22:54 -06:00
|
|
|
_BackgroundUpdates.ADD_ROOMS_ROOM_VERSION_COLUMN
|
2020-01-27 07:30:57 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
return batch_size
|
|
|
|
|
2020-01-07 07:18:43 -07:00
|
|
|
async def _remove_tombstoned_rooms_from_directory(
|
2021-12-15 11:00:48 -07:00
|
|
|
self, progress: JsonDict, batch_size: int
|
2020-01-07 07:18:43 -07:00
|
|
|
) -> int:
|
|
|
|
"""Removes any rooms with tombstone events from the room directory
|
|
|
|
|
|
|
|
Nowadays this is handled by the room upgrade handler, but we may have some
|
|
|
|
that got left behind
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_room = progress.get("room_id", "")
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_rooms(txn: LoggingTransaction) -> List[str]:
|
2020-01-07 07:18:43 -07:00
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT room_id
|
|
|
|
FROM rooms r
|
|
|
|
INNER JOIN current_state_events cse USING (room_id)
|
|
|
|
WHERE room_id > ? AND r.is_public
|
|
|
|
AND cse.type = '%s' AND cse.state_key = ''
|
|
|
|
ORDER BY room_id ASC
|
|
|
|
LIMIT ?;
|
|
|
|
"""
|
|
|
|
% EventTypes.Tombstone,
|
|
|
|
(last_room, batch_size),
|
|
|
|
)
|
|
|
|
|
|
|
|
return [row[0] for row in txn]
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
rooms = await self.db_pool.runInteraction(
|
2020-01-07 07:18:43 -07:00
|
|
|
"get_tombstoned_directory_rooms", _get_rooms
|
|
|
|
)
|
|
|
|
|
|
|
|
if not rooms:
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.updates._end_background_update(
|
2021-07-12 10:22:54 -06:00
|
|
|
_BackgroundUpdates.REMOVE_TOMESTONED_ROOMS_BG_UPDATE
|
2020-01-07 07:18:43 -07:00
|
|
|
)
|
|
|
|
return 0
|
|
|
|
|
|
|
|
for room_id in rooms:
|
|
|
|
logger.info("Removing tombstoned room %s from the directory", room_id)
|
|
|
|
await self.set_room_is_public(room_id, False)
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.updates._background_update_progress(
|
2021-07-12 10:22:54 -06:00
|
|
|
_BackgroundUpdates.REMOVE_TOMESTONED_ROOMS_BG_UPDATE, {"room_id": rooms[-1]}
|
2020-01-07 07:18:43 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
return len(rooms)
|
|
|
|
|
|
|
|
@abstractmethod
|
2021-12-15 11:00:48 -07:00
|
|
|
def set_room_is_public(self, room_id: str, is_public: bool) -> Awaitable[None]:
|
2020-01-07 07:18:43 -07:00
|
|
|
# this will need to be implemented if a background update is performed with
|
|
|
|
# existing (tombstoned, public) rooms in the database.
|
|
|
|
#
|
|
|
|
# It's overridden by RoomStore for the synapse master.
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2021-01-11 09:09:22 -07:00
|
|
|
async def has_auth_chain_index(self, room_id: str) -> bool:
|
|
|
|
"""Check if the room has (or can have) a chain cover index.
|
|
|
|
|
|
|
|
Defaults to True if we don't have an entry in `rooms` table nor any
|
|
|
|
events for the room.
|
|
|
|
"""
|
|
|
|
|
|
|
|
has_auth_chain_index = await self.db_pool.simple_select_one_onecol(
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
retcol="has_auth_chain_index",
|
|
|
|
desc="has_auth_chain_index",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
if has_auth_chain_index:
|
|
|
|
return True
|
|
|
|
|
|
|
|
# It's possible that we already have events for the room in our DB
|
|
|
|
# without a corresponding room entry. If we do then we don't want to
|
|
|
|
# mark the room as having an auth chain cover index.
|
|
|
|
max_ordering = await self.db_pool.simple_select_one_onecol(
|
|
|
|
table="events",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
retcol="MAX(stream_ordering)",
|
|
|
|
allow_none=True,
|
2021-09-01 09:27:58 -06:00
|
|
|
desc="has_auth_chain_index_fallback",
|
2021-01-11 09:09:22 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
return max_ordering is None
|
|
|
|
|
2021-07-12 10:22:54 -06:00
|
|
|
async def _background_populate_room_depth_min_depth2(
|
|
|
|
self, progress: JsonDict, batch_size: int
|
|
|
|
) -> int:
|
|
|
|
"""Populate room_depth.min_depth2
|
|
|
|
|
|
|
|
This is to deal with the fact that min_depth was initially created as a
|
|
|
|
32-bit integer field.
|
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def process(txn: LoggingTransaction) -> int:
|
2021-07-12 10:22:54 -06:00
|
|
|
last_room = progress.get("last_room", "")
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
UPDATE room_depth SET min_depth2=min_depth
|
|
|
|
WHERE room_id IN (
|
|
|
|
SELECT room_id FROM room_depth WHERE room_id > ?
|
|
|
|
ORDER BY room_id LIMIT ?
|
|
|
|
)
|
|
|
|
RETURNING room_id;
|
|
|
|
""",
|
|
|
|
(last_room, batch_size),
|
|
|
|
)
|
|
|
|
row_count = txn.rowcount
|
|
|
|
if row_count == 0:
|
|
|
|
return 0
|
|
|
|
last_room = max(row[0] for row in txn)
|
|
|
|
logger.info("populated room_depth up to %s", last_room)
|
|
|
|
|
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
|
|
|
txn,
|
|
|
|
_BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2,
|
|
|
|
{"last_room": last_room},
|
|
|
|
)
|
|
|
|
return row_count
|
|
|
|
|
|
|
|
result = await self.db_pool.runInteraction(
|
|
|
|
"_background_populate_min_depth2", process
|
|
|
|
)
|
|
|
|
|
|
|
|
if result != 0:
|
|
|
|
return result
|
|
|
|
|
|
|
|
await self.db_pool.updates._end_background_update(
|
|
|
|
_BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2
|
|
|
|
)
|
|
|
|
return 0
|
|
|
|
|
|
|
|
async def _background_replace_room_depth_min_depth(
|
|
|
|
self, progress: JsonDict, batch_size: int
|
|
|
|
) -> int:
|
|
|
|
"""Drop the old 'min_depth' column and rename 'min_depth2' into its place."""
|
|
|
|
|
|
|
|
def process(txn: Cursor) -> None:
|
|
|
|
for sql in _REPLACE_ROOM_DEPTH_SQL_COMMANDS:
|
|
|
|
logger.info("completing room_depth migration: %s", sql)
|
|
|
|
txn.execute(sql)
|
|
|
|
|
|
|
|
await self.db_pool.runInteraction("_background_replace_room_depth", process)
|
|
|
|
|
|
|
|
await self.db_pool.updates._end_background_update(
|
|
|
|
_BackgroundUpdates.REPLACE_ROOM_DEPTH_MIN_DEPTH,
|
|
|
|
)
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
2021-09-01 09:27:58 -06:00
|
|
|
async def _background_populate_rooms_creator_column(
|
2021-12-15 11:00:48 -07:00
|
|
|
self, progress: JsonDict, batch_size: int
|
|
|
|
) -> int:
|
2021-09-01 09:27:58 -06:00
|
|
|
"""Background update to go and add creator information to `rooms`
|
|
|
|
table from `current_state_events` table.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_room_id = progress.get("room_id", "")
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _background_populate_rooms_creator_column_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> bool:
|
2021-09-01 09:27:58 -06:00
|
|
|
sql = """
|
|
|
|
SELECT room_id, json FROM event_json
|
|
|
|
INNER JOIN rooms AS room USING (room_id)
|
|
|
|
INNER JOIN current_state_events AS state_event USING (room_id, event_id)
|
|
|
|
WHERE room_id > ? AND (room.creator IS NULL OR room.creator = '') AND state_event.type = 'm.room.create' AND state_event.state_key = ''
|
|
|
|
ORDER BY room_id
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (last_room_id, batch_size))
|
|
|
|
room_id_to_create_event_results = txn.fetchall()
|
|
|
|
|
|
|
|
new_last_room_id = ""
|
|
|
|
for room_id, event_json in room_id_to_create_event_results:
|
|
|
|
event_dict = db_to_json(event_json)
|
|
|
|
|
|
|
|
creator = event_dict.get("content").get(EventContentFields.ROOM_CREATOR)
|
|
|
|
|
|
|
|
self.db_pool.simple_update_txn(
|
|
|
|
txn,
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
updatevalues={"creator": creator},
|
|
|
|
)
|
|
|
|
new_last_room_id = room_id
|
|
|
|
|
|
|
|
if new_last_room_id == "":
|
|
|
|
return True
|
|
|
|
|
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
|
|
|
txn,
|
|
|
|
_BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,
|
|
|
|
{"room_id": new_last_room_id},
|
|
|
|
)
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
end = await self.db_pool.runInteraction(
|
|
|
|
"_background_populate_rooms_creator_column",
|
|
|
|
_background_populate_rooms_creator_column_txn,
|
|
|
|
)
|
|
|
|
|
|
|
|
if end:
|
|
|
|
await self.db_pool.updates._end_background_update(
|
|
|
|
_BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN
|
|
|
|
)
|
|
|
|
|
|
|
|
return batch_size
|
|
|
|
|
2022-06-29 11:12:45 -06:00
|
|
|
async def _background_add_room_type_column(
|
|
|
|
self, progress: JsonDict, batch_size: int
|
|
|
|
) -> int:
|
|
|
|
"""Background update to go and add room_type information to `room_stats_state`
|
|
|
|
table from `event_json` table.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_room_id = progress.get("room_id", "")
|
|
|
|
|
|
|
|
def _background_add_room_type_column_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> bool:
|
|
|
|
sql = """
|
|
|
|
SELECT state.room_id, json FROM event_json
|
|
|
|
INNER JOIN current_state_events AS state USING (event_id)
|
|
|
|
WHERE state.room_id > ? AND type = 'm.room.create'
|
|
|
|
ORDER BY state.room_id
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (last_room_id, batch_size))
|
|
|
|
room_id_to_create_event_results = txn.fetchall()
|
|
|
|
|
|
|
|
new_last_room_id = None
|
|
|
|
for room_id, event_json in room_id_to_create_event_results:
|
|
|
|
event_dict = db_to_json(event_json)
|
|
|
|
|
|
|
|
room_type = event_dict.get("content", {}).get(
|
|
|
|
EventContentFields.ROOM_TYPE, None
|
|
|
|
)
|
|
|
|
if isinstance(room_type, str):
|
|
|
|
self.db_pool.simple_update_txn(
|
|
|
|
txn,
|
|
|
|
table="room_stats_state",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
updatevalues={"room_type": room_type},
|
|
|
|
)
|
|
|
|
|
|
|
|
new_last_room_id = room_id
|
|
|
|
|
|
|
|
if new_last_room_id is None:
|
|
|
|
return True
|
|
|
|
|
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
|
|
|
txn,
|
|
|
|
_BackgroundUpdates.ADD_ROOM_TYPE_COLUMN,
|
|
|
|
{"room_id": new_last_room_id},
|
|
|
|
)
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
end = await self.db_pool.runInteraction(
|
|
|
|
"_background_add_room_type_column",
|
|
|
|
_background_add_room_type_column_txn,
|
|
|
|
)
|
|
|
|
|
|
|
|
if end:
|
|
|
|
await self.db_pool.updates._end_background_update(
|
|
|
|
_BackgroundUpdates.ADD_ROOM_TYPE_COLUMN
|
|
|
|
)
|
|
|
|
|
|
|
|
return batch_size
|
|
|
|
|
2019-12-04 10:57:35 -07:00
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
|
2021-12-13 10:05:00 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2020-09-18 07:56:44 -06:00
|
|
|
super().__init__(database, db_conn, hs)
|
2019-12-04 10:57:35 -07:00
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
|
2019-12-04 10:57:35 -07:00
|
|
|
|
2022-12-05 06:07:55 -07:00
|
|
|
self._instance_name = hs.get_instance_name()
|
|
|
|
|
2021-09-01 09:27:58 -06:00
|
|
|
async def upsert_room_on_join(
|
2022-02-21 12:27:35 -07:00
|
|
|
self, room_id: str, room_version: RoomVersion, state_events: List[EventBase]
|
2021-12-15 11:00:48 -07:00
|
|
|
) -> None:
|
2020-02-24 08:46:41 -07:00
|
|
|
"""Ensure that the room is stored in the table
|
|
|
|
|
|
|
|
Called when we join a room over federation, and overwrites any room version
|
|
|
|
currently in the table.
|
|
|
|
"""
|
2021-01-11 09:09:22 -07:00
|
|
|
# It's possible that we already have events for the room in our DB
|
|
|
|
# without a corresponding room entry. If we do then we don't want to
|
|
|
|
# mark the room as having an auth chain cover index.
|
|
|
|
has_auth_chain_index = await self.has_auth_chain_index(room_id)
|
|
|
|
|
2021-09-01 09:27:58 -06:00
|
|
|
create_event = None
|
2022-02-21 12:27:35 -07:00
|
|
|
for e in state_events:
|
2021-09-01 09:27:58 -06:00
|
|
|
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
|
|
|
create_event = e
|
|
|
|
break
|
|
|
|
|
|
|
|
if create_event is None:
|
|
|
|
# If the state doesn't have a create event then the room is
|
|
|
|
# invalid, and it would fail auth checks anyway.
|
|
|
|
raise StoreError(400, "No create event in state")
|
|
|
|
|
|
|
|
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
|
|
|
|
|
|
|
|
if not isinstance(room_creator, str):
|
|
|
|
# If the create event does not have a creator then the room is
|
|
|
|
# invalid, and it would fail auth checks anyway.
|
|
|
|
raise StoreError(400, "No creator defined on the create event")
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.simple_upsert(
|
2020-02-24 08:46:41 -07:00
|
|
|
desc="upsert_room_on_join",
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
values={"room_version": room_version.identifier},
|
2021-01-11 09:09:22 -07:00
|
|
|
insertion_values={
|
|
|
|
"is_public": False,
|
2021-09-01 09:27:58 -06:00
|
|
|
"creator": room_creator,
|
2021-01-11 09:09:22 -07:00
|
|
|
"has_auth_chain_index": has_auth_chain_index,
|
|
|
|
},
|
2020-02-24 08:46:41 -07:00
|
|
|
)
|
|
|
|
|
2022-03-01 05:49:54 -07:00
|
|
|
async def store_partial_state_room(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
servers: Collection[str],
|
2022-09-27 10:26:35 -06:00
|
|
|
device_lists_stream_id: int,
|
2022-10-18 05:33:18 -06:00
|
|
|
joined_via: str,
|
2022-03-01 05:49:54 -07:00
|
|
|
) -> None:
|
2022-09-27 10:26:35 -06:00
|
|
|
"""Mark the given room as containing events with partial state.
|
|
|
|
|
|
|
|
We also store additional data that describes _when_ we first partial-joined this
|
|
|
|
room, which helps us to keep other homeservers in sync when we finally fully
|
|
|
|
join this room.
|
|
|
|
|
|
|
|
We do not include a `join_event_id` here---we need to wait for the join event
|
|
|
|
to be persisted first.
|
2022-03-01 05:49:54 -07:00
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: the ID of the room
|
|
|
|
servers: other servers known to be in the room
|
2022-09-27 10:26:35 -06:00
|
|
|
device_lists_stream_id: the device_lists stream ID at the time when we first
|
|
|
|
joined the room.
|
2022-10-18 05:33:18 -06:00
|
|
|
joined_via: the server name we requested a partial join from.
|
2022-03-01 05:49:54 -07:00
|
|
|
"""
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"store_partial_state_room",
|
|
|
|
self._store_partial_state_room_txn,
|
|
|
|
room_id,
|
|
|
|
servers,
|
2022-09-27 10:26:35 -06:00
|
|
|
device_lists_stream_id,
|
2022-10-18 05:33:18 -06:00
|
|
|
joined_via,
|
2022-03-01 05:49:54 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
def _store_partial_state_room_txn(
|
2022-09-27 10:26:35 -06:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
servers: Collection[str],
|
|
|
|
device_lists_stream_id: int,
|
2022-10-18 05:33:18 -06:00
|
|
|
joined_via: str,
|
2022-03-01 05:49:54 -07:00
|
|
|
) -> None:
|
|
|
|
DatabasePool.simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="partial_state_rooms",
|
|
|
|
values={
|
|
|
|
"room_id": room_id,
|
2022-09-27 10:26:35 -06:00
|
|
|
"device_lists_stream_id": device_lists_stream_id,
|
|
|
|
# To be updated later once the join event is persisted.
|
|
|
|
"join_event_id": None,
|
2022-10-18 05:33:18 -06:00
|
|
|
"joined_via": joined_via,
|
2022-03-01 05:49:54 -07:00
|
|
|
},
|
|
|
|
)
|
|
|
|
DatabasePool.simple_insert_many_txn(
|
|
|
|
txn,
|
|
|
|
table="partial_state_rooms_servers",
|
|
|
|
keys=("room_id", "server_name"),
|
|
|
|
values=((room_id, s) for s in servers),
|
|
|
|
)
|
2022-09-01 09:07:01 -06:00
|
|
|
self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
|
2022-10-03 07:13:11 -06:00
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.get_partial_state_servers_at_join, (room_id,)
|
|
|
|
)
|
2022-03-01 05:49:54 -07:00
|
|
|
|
2022-09-27 10:26:35 -06:00
|
|
|
async def write_partial_state_rooms_join_event_id(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
join_event_id: str,
|
|
|
|
) -> None:
|
|
|
|
"""Record the join event which resulted from a partial join.
|
|
|
|
|
|
|
|
We do this separately to `store_partial_state_room` because we need to wait for
|
|
|
|
the join event to be persisted. Otherwise we violate a foreign key constraint.
|
|
|
|
"""
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"write_partial_state_rooms_join_event_id",
|
|
|
|
self._write_partial_state_rooms_join_event_id,
|
|
|
|
room_id,
|
|
|
|
join_event_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _write_partial_state_rooms_join_event_id(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
join_event_id: str,
|
|
|
|
) -> None:
|
|
|
|
DatabasePool.simple_update_txn(
|
|
|
|
txn,
|
|
|
|
table="partial_state_rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
updatevalues={"join_event_id": join_event_id},
|
|
|
|
)
|
|
|
|
|
2020-11-13 09:24:04 -07:00
|
|
|
async def maybe_store_room_on_outlier_membership(
|
|
|
|
self, room_id: str, room_version: RoomVersion
|
2021-12-15 11:00:48 -07:00
|
|
|
) -> None:
|
2020-02-26 09:58:33 -07:00
|
|
|
"""
|
2020-11-13 09:24:04 -07:00
|
|
|
When we receive an invite or any other event over federation that may relate to a room
|
|
|
|
we are not in, store the version of the room if we don't already know the room version.
|
2020-02-26 09:58:33 -07:00
|
|
|
"""
|
2021-01-11 09:09:22 -07:00
|
|
|
# It's possible that we already have events for the room in our DB
|
|
|
|
# without a corresponding room entry. If we do then we don't want to
|
|
|
|
# mark the room as having an auth chain cover index.
|
|
|
|
has_auth_chain_index = await self.has_auth_chain_index(room_id)
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.simple_upsert(
|
2020-11-13 09:24:04 -07:00
|
|
|
desc="maybe_store_room_on_outlier_membership",
|
2020-02-26 09:58:33 -07:00
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
values={},
|
|
|
|
insertion_values={
|
|
|
|
"room_version": room_version.identifier,
|
|
|
|
"is_public": False,
|
2021-09-01 09:27:58 -06:00
|
|
|
# We don't worry about setting the `creator` here because
|
|
|
|
# we don't process any messages in a room while a user is
|
|
|
|
# invited (only after the join).
|
2020-02-26 09:58:33 -07:00
|
|
|
"creator": "",
|
2021-01-11 09:09:22 -07:00
|
|
|
"has_auth_chain_index": has_auth_chain_index,
|
2020-02-26 09:58:33 -07:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2021-08-17 07:02:50 -06:00
|
|
|
async def set_room_is_public(self, room_id: str, is_public: bool) -> None:
|
|
|
|
await self.db_pool.simple_update_one(
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
updatevalues={"is_public": is_public},
|
|
|
|
desc="set_room_is_public",
|
|
|
|
)
|
2016-09-14 10:01:02 -06:00
|
|
|
|
2016-12-12 07:28:15 -07:00
|
|
|
self.hs.get_notifier().on_new_replication_data()
|
2016-03-21 08:03:20 -06:00
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
async def set_room_is_public_appservice(
|
2021-12-15 11:00:48 -07:00
|
|
|
self, room_id: str, appservice_id: str, network_id: str, is_public: bool
|
|
|
|
) -> None:
|
2016-12-06 03:43:48 -07:00
|
|
|
"""Edit the appservice/network specific public room list.
|
2016-12-12 04:00:27 -07:00
|
|
|
|
|
|
|
Each appservice can have a number of published room lists associated
|
|
|
|
with them, keyed off of an appservice defined `network_id`, which
|
|
|
|
basically represents a single instance of a bridge to a third party
|
|
|
|
network.
|
|
|
|
|
|
|
|
Args:
|
2021-12-15 11:00:48 -07:00
|
|
|
room_id
|
|
|
|
appservice_id
|
|
|
|
network_id
|
|
|
|
is_public: Whether to publish or unpublish the room from the list.
|
2016-12-06 03:43:48 -07:00
|
|
|
"""
|
2019-04-03 03:07:29 -06:00
|
|
|
|
2021-08-17 07:02:50 -06:00
|
|
|
if is_public:
|
|
|
|
await self.db_pool.simple_upsert(
|
|
|
|
table="appservice_room_list",
|
2016-12-06 03:43:48 -07:00
|
|
|
keyvalues={
|
2021-08-17 07:02:50 -06:00
|
|
|
"appservice_id": appservice_id,
|
|
|
|
"network_id": network_id,
|
2016-12-06 03:43:48 -07:00
|
|
|
"room_id": room_id,
|
2021-08-17 07:02:50 -06:00
|
|
|
},
|
|
|
|
values={},
|
|
|
|
insertion_values={
|
2016-12-06 03:43:48 -07:00
|
|
|
"appservice_id": appservice_id,
|
|
|
|
"network_id": network_id,
|
2021-08-17 07:02:50 -06:00
|
|
|
"room_id": room_id,
|
2016-12-06 03:43:48 -07:00
|
|
|
},
|
2021-08-17 07:02:50 -06:00
|
|
|
desc="set_room_is_public_appservice_true",
|
2016-12-06 03:43:48 -07:00
|
|
|
)
|
2021-08-17 07:02:50 -06:00
|
|
|
else:
|
|
|
|
await self.db_pool.simple_delete(
|
|
|
|
table="appservice_room_list",
|
|
|
|
keyvalues={
|
|
|
|
"appservice_id": appservice_id,
|
|
|
|
"network_id": network_id,
|
|
|
|
"room_id": room_id,
|
|
|
|
},
|
|
|
|
desc="set_room_is_public_appservice_false",
|
2016-12-06 03:43:48 -07:00
|
|
|
)
|
2021-08-17 07:02:50 -06:00
|
|
|
|
2016-12-12 07:28:15 -07:00
|
|
|
self.hs.get_notifier().on_new_replication_data()
|
2016-12-06 03:43:48 -07:00
|
|
|
|
2020-08-27 11:38:41 -06:00
|
|
|
async def add_event_report(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
event_id: str,
|
|
|
|
user_id: str,
|
2021-05-27 11:42:23 -06:00
|
|
|
reason: Optional[str],
|
2020-08-27 11:38:41 -06:00
|
|
|
content: JsonDict,
|
|
|
|
received_ts: int,
|
|
|
|
) -> None:
|
2016-05-04 08:19:12 -06:00
|
|
|
next_id = self._event_reports_id_gen.get_next()
|
2020-08-27 11:38:41 -06:00
|
|
|
await self.db_pool.simple_insert(
|
2016-05-04 04:28:10 -06:00
|
|
|
table="event_reports",
|
|
|
|
values={
|
2016-05-04 08:19:12 -06:00
|
|
|
"id": next_id,
|
|
|
|
"received_ts": received_ts,
|
2016-05-04 04:28:10 -06:00
|
|
|
"room_id": room_id,
|
|
|
|
"event_id": event_id,
|
|
|
|
"user_id": user_id,
|
|
|
|
"reason": reason,
|
2020-08-20 08:32:33 -06:00
|
|
|
"content": json_encoder.encode(content),
|
2016-05-04 04:28:10 -06:00
|
|
|
},
|
2019-04-03 03:07:29 -06:00
|
|
|
desc="add_event_report",
|
2016-05-04 04:28:10 -06:00
|
|
|
)
|
2016-09-15 04:27:04 -06:00
|
|
|
|
2020-10-26 12:16:37 -06:00
|
|
|
async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]:
|
|
|
|
"""Retrieve an event report
|
|
|
|
|
|
|
|
Args:
|
|
|
|
report_id: ID of reported event in database
|
|
|
|
Returns:
|
2022-11-16 08:25:24 -07:00
|
|
|
JSON dict of information from an event report or None if the
|
|
|
|
report does not exist.
|
2020-10-26 12:16:37 -06:00
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_event_report_txn(
|
|
|
|
txn: LoggingTransaction, report_id: int
|
|
|
|
) -> Optional[Dict[str, Any]]:
|
2020-10-26 12:16:37 -06:00
|
|
|
|
|
|
|
sql = """
|
|
|
|
SELECT
|
|
|
|
er.id,
|
|
|
|
er.received_ts,
|
|
|
|
er.room_id,
|
|
|
|
er.event_id,
|
|
|
|
er.user_id,
|
|
|
|
er.content,
|
|
|
|
events.sender,
|
|
|
|
room_stats_state.canonical_alias,
|
|
|
|
room_stats_state.name,
|
|
|
|
event_json.json AS event_json
|
|
|
|
FROM event_reports AS er
|
|
|
|
LEFT JOIN events
|
|
|
|
ON events.event_id = er.event_id
|
|
|
|
JOIN event_json
|
|
|
|
ON event_json.event_id = er.event_id
|
|
|
|
JOIN room_stats_state
|
|
|
|
ON room_stats_state.room_id = er.room_id
|
|
|
|
WHERE er.id = ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, [report_id])
|
|
|
|
row = txn.fetchone()
|
|
|
|
|
|
|
|
if not row:
|
|
|
|
return None
|
|
|
|
|
|
|
|
event_report = {
|
|
|
|
"id": row[0],
|
|
|
|
"received_ts": row[1],
|
|
|
|
"room_id": row[2],
|
|
|
|
"event_id": row[3],
|
|
|
|
"user_id": row[4],
|
|
|
|
"score": db_to_json(row[5]).get("score"),
|
|
|
|
"reason": db_to_json(row[5]).get("reason"),
|
|
|
|
"sender": row[6],
|
|
|
|
"canonical_alias": row[7],
|
|
|
|
"name": row[8],
|
|
|
|
"event_json": db_to_json(row[9]),
|
|
|
|
}
|
|
|
|
|
|
|
|
return event_report
|
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"get_event_report", _get_event_report_txn, report_id
|
|
|
|
)
|
|
|
|
|
2020-09-22 11:15:04 -06:00
|
|
|
async def get_event_reports_paginate(
|
|
|
|
self,
|
|
|
|
start: int,
|
|
|
|
limit: int,
|
|
|
|
direction: str = "b",
|
|
|
|
user_id: Optional[str] = None,
|
|
|
|
room_id: Optional[str] = None,
|
|
|
|
) -> Tuple[List[Dict[str, Any]], int]:
|
|
|
|
"""Retrieve a paginated list of event reports
|
|
|
|
|
|
|
|
Args:
|
|
|
|
start: event offset to begin the query from
|
|
|
|
limit: number of rows to retrieve
|
|
|
|
direction: Whether to fetch the most recent first (`"b"`) or the
|
|
|
|
oldest first (`"f"`)
|
|
|
|
user_id: search for user_id. Ignored if user_id is None
|
|
|
|
room_id: search for room_id. Ignored if room_id is None
|
|
|
|
Returns:
|
2022-11-16 08:25:24 -07:00
|
|
|
Tuple of:
|
|
|
|
json list of event reports
|
|
|
|
total number of event reports matching the filter criteria
|
2020-09-22 11:15:04 -06:00
|
|
|
"""
|
|
|
|
|
2021-12-15 11:00:48 -07:00
|
|
|
def _get_event_reports_paginate_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> Tuple[List[Dict[str, Any]], int]:
|
2020-09-22 11:15:04 -06:00
|
|
|
filters = []
|
2021-12-15 11:00:48 -07:00
|
|
|
args: List[object] = []
|
2020-09-22 11:15:04 -06:00
|
|
|
|
|
|
|
if user_id:
|
|
|
|
filters.append("er.user_id LIKE ?")
|
|
|
|
args.extend(["%" + user_id + "%"])
|
|
|
|
if room_id:
|
|
|
|
filters.append("er.room_id LIKE ?")
|
|
|
|
args.extend(["%" + room_id + "%"])
|
|
|
|
|
|
|
|
if direction == "b":
|
|
|
|
order = "DESC"
|
|
|
|
else:
|
|
|
|
order = "ASC"
|
|
|
|
|
|
|
|
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
|
|
|
|
|
2022-08-17 12:08:23 -06:00
|
|
|
# We join on room_stats_state despite not using any columns from it
|
|
|
|
# because the join can influence the number of rows returned;
|
|
|
|
# e.g. a room that doesn't have state, maybe because it was deleted.
|
|
|
|
# The query returning the total count should be consistent with
|
|
|
|
# the query returning the results.
|
2020-09-22 11:15:04 -06:00
|
|
|
sql = """
|
|
|
|
SELECT COUNT(*) as total_event_reports
|
|
|
|
FROM event_reports AS er
|
2022-08-17 12:08:23 -06:00
|
|
|
JOIN room_stats_state ON room_stats_state.room_id = er.room_id
|
2020-09-22 11:15:04 -06:00
|
|
|
{}
|
|
|
|
""".format(
|
|
|
|
where_clause
|
|
|
|
)
|
|
|
|
txn.execute(sql, args)
|
2021-12-15 11:00:48 -07:00
|
|
|
count = cast(Tuple[int], txn.fetchone())[0]
|
2020-09-22 11:15:04 -06:00
|
|
|
|
|
|
|
sql = """
|
|
|
|
SELECT
|
|
|
|
er.id,
|
|
|
|
er.received_ts,
|
|
|
|
er.room_id,
|
|
|
|
er.event_id,
|
|
|
|
er.user_id,
|
|
|
|
er.content,
|
|
|
|
events.sender,
|
2020-10-26 12:16:37 -06:00
|
|
|
room_stats_state.canonical_alias,
|
|
|
|
room_stats_state.name
|
2020-09-22 11:15:04 -06:00
|
|
|
FROM event_reports AS er
|
2020-10-26 12:16:37 -06:00
|
|
|
LEFT JOIN events
|
2020-09-22 11:15:04 -06:00
|
|
|
ON events.event_id = er.event_id
|
2020-10-26 12:16:37 -06:00
|
|
|
JOIN room_stats_state
|
|
|
|
ON room_stats_state.room_id = er.room_id
|
2020-09-22 11:15:04 -06:00
|
|
|
{where_clause}
|
|
|
|
ORDER BY er.received_ts {order}
|
|
|
|
LIMIT ?
|
|
|
|
OFFSET ?
|
|
|
|
""".format(
|
|
|
|
where_clause=where_clause,
|
|
|
|
order=order,
|
|
|
|
)
|
|
|
|
|
|
|
|
args += [limit, start]
|
|
|
|
txn.execute(sql, args)
|
2020-10-26 12:16:37 -06:00
|
|
|
|
|
|
|
event_reports = []
|
|
|
|
for row in txn:
|
|
|
|
try:
|
|
|
|
s = db_to_json(row[5]).get("score")
|
|
|
|
r = db_to_json(row[5]).get("reason")
|
|
|
|
except Exception:
|
|
|
|
logger.error("Unable to parse json from event_reports: %s", row[0])
|
|
|
|
continue
|
|
|
|
event_reports.append(
|
|
|
|
{
|
|
|
|
"id": row[0],
|
|
|
|
"received_ts": row[1],
|
|
|
|
"room_id": row[2],
|
|
|
|
"event_id": row[3],
|
|
|
|
"user_id": row[4],
|
|
|
|
"score": s,
|
|
|
|
"reason": r,
|
|
|
|
"sender": row[6],
|
|
|
|
"canonical_alias": row[7],
|
|
|
|
"name": row[8],
|
|
|
|
}
|
|
|
|
)
|
2020-09-22 11:15:04 -06:00
|
|
|
|
|
|
|
return event_reports, count
|
|
|
|
|
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"get_event_reports_paginate", _get_event_reports_paginate_txn
|
|
|
|
)
|
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
async def block_room(self, room_id: str, user_id: str) -> None:
|
2021-11-09 06:11:47 -07:00
|
|
|
"""Marks the room as blocked.
|
|
|
|
|
|
|
|
Can be called multiple times (though we'll only track the last user to
|
|
|
|
block this room).
|
|
|
|
|
|
|
|
Can be called on a room unknown to this homeserver.
|
2019-03-20 11:49:56 -06:00
|
|
|
|
|
|
|
Args:
|
2020-07-30 05:20:41 -06:00
|
|
|
room_id: Room to block
|
|
|
|
user_id: Who blocked it
|
2019-03-20 11:49:56 -06:00
|
|
|
"""
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.simple_upsert(
|
2017-06-19 05:36:28 -06:00
|
|
|
table="blocked_rooms",
|
2019-04-03 03:07:29 -06:00
|
|
|
keyvalues={"room_id": room_id},
|
2019-03-21 04:21:15 -06:00
|
|
|
values={},
|
2019-04-03 03:07:29 -06:00
|
|
|
insertion_values={"user_id": user_id},
|
2017-06-19 05:36:28 -06:00
|
|
|
desc="block_room",
|
|
|
|
)
|
2020-08-05 14:38:57 -06:00
|
|
|
await self.db_pool.runInteraction(
|
2018-03-01 08:20:54 -07:00
|
|
|
"block_room_invalidation",
|
|
|
|
self._invalidate_cache_and_stream,
|
2019-04-03 03:07:29 -06:00
|
|
|
self.is_room_blocked,
|
|
|
|
(room_id,),
|
2018-03-01 08:20:54 -07:00
|
|
|
)
|
2021-11-18 10:43:49 -07:00
|
|
|
|
|
|
|
async def unblock_room(self, room_id: str) -> None:
|
|
|
|
"""Remove the room from blocking list.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: Room to unblock
|
|
|
|
"""
|
|
|
|
await self.db_pool.simple_delete(
|
|
|
|
table="blocked_rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
desc="unblock_room",
|
|
|
|
)
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"block_room_invalidation",
|
|
|
|
self._invalidate_cache_and_stream,
|
|
|
|
self.is_room_blocked,
|
|
|
|
(room_id,),
|
|
|
|
)
|
2022-12-05 06:07:55 -07:00
|
|
|
|
|
|
|
async def clear_partial_state_room(self, room_id: str) -> bool:
|
|
|
|
"""Clears the partial state flag for a room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The room whose partial state flag is to be cleared.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
`True` if the partial state flag has been cleared successfully.
|
|
|
|
|
|
|
|
`False` if the partial state flag could not be cleared because the room
|
|
|
|
still contains events with partial state.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id:
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"clear_partial_state_room",
|
|
|
|
self._clear_partial_state_room_txn,
|
|
|
|
room_id,
|
|
|
|
un_partial_state_room_stream_id,
|
|
|
|
)
|
|
|
|
return True
|
|
|
|
except self.db_pool.engine.module.IntegrityError as e:
|
|
|
|
# Assume that any `IntegrityError`s are due to partial state events.
|
|
|
|
logger.info(
|
|
|
|
"Exception while clearing lazy partial-state-room %s, retrying: %s",
|
|
|
|
room_id,
|
|
|
|
e,
|
|
|
|
)
|
|
|
|
return False
|
|
|
|
|
|
|
|
def _clear_partial_state_room_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
un_partial_state_room_stream_id: int,
|
|
|
|
) -> None:
|
|
|
|
DatabasePool.simple_delete_txn(
|
|
|
|
txn,
|
|
|
|
table="partial_state_rooms_servers",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
)
|
|
|
|
DatabasePool.simple_delete_one_txn(
|
|
|
|
txn,
|
|
|
|
table="partial_state_rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
)
|
|
|
|
self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
|
|
|
|
self._invalidate_cache_and_stream(
|
|
|
|
txn, self.get_partial_state_servers_at_join, (room_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
DatabasePool.simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
"un_partial_stated_room_stream",
|
|
|
|
{
|
|
|
|
"stream_id": un_partial_state_room_stream_id,
|
|
|
|
"instance_name": self._instance_name,
|
|
|
|
"room_id": room_id,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
# We now delete anything from `device_lists_remote_pending` with a
|
|
|
|
# stream ID less than the minimum
|
|
|
|
# `partial_state_rooms.device_lists_stream_id`, as we no longer need them.
|
|
|
|
device_lists_stream_id = DatabasePool.simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="partial_state_rooms",
|
|
|
|
keyvalues={},
|
|
|
|
retcol="MIN(device_lists_stream_id)",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if device_lists_stream_id is None:
|
|
|
|
# There are no rooms being currently partially joined, so we delete everything.
|
|
|
|
txn.execute("DELETE FROM device_lists_remote_pending")
|
|
|
|
else:
|
|
|
|
sql = """
|
|
|
|
DELETE FROM device_lists_remote_pending
|
|
|
|
WHERE stream_id <= ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (device_lists_stream_id,))
|