2014-08-12 08:10:52 -06:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 21:26:29 -07:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2014-08-12 08:10:52 -06:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-08-19 09:40:25 -06:00
|
|
|
""" This module is responsible for getting events from the DB for pagination
|
|
|
|
and event streaming.
|
|
|
|
|
|
|
|
The order it returns events in depend on whether we are streaming forwards or
|
|
|
|
are paginating backwards. We do this because we want to handle out of order
|
|
|
|
messages nicely, while still returning them in the correct order when we
|
|
|
|
paginate bacwards.
|
|
|
|
|
|
|
|
This is implemented by keeping two ordering columns: stream_ordering and
|
|
|
|
topological_ordering. Stream ordering is basically insertion/received order
|
2015-10-28 07:45:56 -06:00
|
|
|
(except for events from backfill requests). The topological_ordering is a
|
2014-08-19 09:40:25 -06:00
|
|
|
weak ordering of events based on the pdu graph.
|
|
|
|
|
|
|
|
This means that we have to have two different types of tokens, depending on
|
|
|
|
what sort order was used:
|
|
|
|
- stream tokens are of the form: "s%d", which maps directly to the column
|
|
|
|
- topological tokems: "t%d-%d", where the integers map to the topological
|
|
|
|
and stream ordering columns respectively.
|
|
|
|
"""
|
|
|
|
|
2014-08-14 11:40:50 -06:00
|
|
|
from twisted.internet import defer
|
2014-08-12 20:14:34 -06:00
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
from synapse.storage._base import SQLBaseStore
|
|
|
|
from synapse.storage.events import EventsWorkerStore
|
|
|
|
|
2015-05-11 11:00:33 -06:00
|
|
|
from synapse.types import RoomStreamToken
|
2018-03-01 06:56:03 -07:00
|
|
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
2018-04-27 04:29:27 -06:00
|
|
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
2018-05-21 10:41:10 -06:00
|
|
|
from synapse.storage.chunk_ordered_table import ChunkDBOrderedListStore
|
2018-05-08 09:19:33 -06:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
2014-08-14 11:01:39 -06:00
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
import abc
|
2014-08-12 08:10:52 -06:00
|
|
|
import logging
|
|
|
|
|
2018-04-28 05:57:00 -06:00
|
|
|
from six.moves import range
|
2018-05-09 04:18:23 -06:00
|
|
|
from collections import namedtuple
|
2018-04-28 05:57:00 -06:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2014-08-14 11:01:39 -06:00
|
|
|
logger = logging.getLogger(__name__)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
|
|
|
|
2014-08-14 11:01:39 -06:00
|
|
|
MAX_STREAM_SIZE = 1000
|
2014-08-12 08:10:52 -06:00
|
|
|
|
|
|
|
|
2014-08-19 07:19:48 -06:00
|
|
|
_STREAM_TOKEN = "stream"
|
|
|
|
_TOPOLOGICAL_TOKEN = "topological"
|
|
|
|
|
|
|
|
|
2018-05-09 04:18:23 -06:00
|
|
|
# Used as return values for pagination APIs
|
|
|
|
_EventDictReturn = namedtuple("_EventDictReturn", (
|
2018-05-21 10:39:06 -06:00
|
|
|
"event_id", "chunk_id", "topological_ordering", "stream_ordering",
|
2018-05-09 04:18:23 -06:00
|
|
|
))
|
|
|
|
|
|
|
|
|
2016-07-05 03:39:13 -06:00
|
|
|
def lower_bound(token, engine, inclusive=False):
|
|
|
|
inclusive = "=" if inclusive else ""
|
2018-05-21 10:39:06 -06:00
|
|
|
if token.chunk is None:
|
2016-07-04 12:44:55 -06:00
|
|
|
return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering")
|
2015-05-11 11:00:33 -06:00
|
|
|
else:
|
2016-07-04 12:44:55 -06:00
|
|
|
if isinstance(engine, PostgresEngine):
|
|
|
|
# Postgres doesn't optimise ``(x < a) OR (x=a AND y<b)`` as well
|
|
|
|
# as it optimises ``(x,y) < (a,b)`` on multicolumn indexes. So we
|
|
|
|
# use the later form when running against postgres.
|
2018-05-21 10:39:06 -06:00
|
|
|
return "(chunk_id = %d AND (%d,%d) <%s (%s,%s))" % (
|
|
|
|
token.chunk, token.topological, token.stream, inclusive,
|
2016-07-04 12:44:55 -06:00
|
|
|
"topological_ordering", "stream_ordering",
|
|
|
|
)
|
2018-05-21 10:39:06 -06:00
|
|
|
return "(chunk_id = %d AND (%d < %s OR (%d = %s AND %d <%s %s)))" % (
|
|
|
|
token.chunk,
|
2015-05-11 11:00:33 -06:00
|
|
|
token.topological, "topological_ordering",
|
|
|
|
token.topological, "topological_ordering",
|
2016-07-04 12:44:55 -06:00
|
|
|
token.stream, inclusive, "stream_ordering",
|
2015-05-11 11:00:33 -06:00
|
|
|
)
|
2015-01-13 07:14:21 -07:00
|
|
|
|
2014-08-19 07:19:48 -06:00
|
|
|
|
2016-07-05 03:39:13 -06:00
|
|
|
def upper_bound(token, engine, inclusive=True):
|
|
|
|
inclusive = "=" if inclusive else ""
|
2018-05-21 10:39:06 -06:00
|
|
|
if token.chunk is None:
|
2016-07-04 12:44:55 -06:00
|
|
|
return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering")
|
2015-05-11 11:00:33 -06:00
|
|
|
else:
|
2016-07-04 12:44:55 -06:00
|
|
|
if isinstance(engine, PostgresEngine):
|
|
|
|
# Postgres doesn't optimise ``(x > a) OR (x=a AND y>b)`` as well
|
|
|
|
# as it optimises ``(x,y) > (a,b)`` on multicolumn indexes. So we
|
|
|
|
# use the later form when running against postgres.
|
2018-05-21 10:39:06 -06:00
|
|
|
return "(chunk_id = %d AND (%d,%d) >%s (%s,%s))" % (
|
|
|
|
token.chunk, token.topological, token.stream, inclusive,
|
2016-07-04 12:44:55 -06:00
|
|
|
"topological_ordering", "stream_ordering",
|
|
|
|
)
|
2018-05-21 10:39:06 -06:00
|
|
|
return "(chunk_id = %d AND (%d > %s OR (%d = %s AND %d >%s %s)))" % (
|
|
|
|
token.chunk,
|
2015-05-11 11:00:33 -06:00
|
|
|
token.topological, "topological_ordering",
|
|
|
|
token.topological, "topological_ordering",
|
2016-07-04 12:44:55 -06:00
|
|
|
token.stream, inclusive, "stream_ordering",
|
2015-05-11 11:00:33 -06:00
|
|
|
)
|
2014-08-19 07:19:48 -06:00
|
|
|
|
|
|
|
|
2016-07-14 08:35:27 -06:00
|
|
|
def filter_to_clause(event_filter):
|
2016-07-20 03:18:09 -06:00
|
|
|
# NB: This may create SQL clauses that don't optimise well (and we don't
|
|
|
|
# have indices on all possible clauses). E.g. it may create
|
|
|
|
# "room_id == X AND room_id != X", which postgres doesn't optimise.
|
|
|
|
|
2016-07-14 08:35:27 -06:00
|
|
|
if not event_filter:
|
|
|
|
return "", []
|
|
|
|
|
|
|
|
clauses = []
|
|
|
|
args = []
|
|
|
|
|
|
|
|
if event_filter.types:
|
|
|
|
clauses.append(
|
|
|
|
"(%s)" % " OR ".join("type = ?" for _ in event_filter.types)
|
|
|
|
)
|
|
|
|
args.extend(event_filter.types)
|
|
|
|
|
|
|
|
for typ in event_filter.not_types:
|
|
|
|
clauses.append("type != ?")
|
|
|
|
args.append(typ)
|
|
|
|
|
|
|
|
if event_filter.senders:
|
|
|
|
clauses.append(
|
|
|
|
"(%s)" % " OR ".join("sender = ?" for _ in event_filter.senders)
|
|
|
|
)
|
|
|
|
args.extend(event_filter.senders)
|
|
|
|
|
|
|
|
for sender in event_filter.not_senders:
|
|
|
|
clauses.append("sender != ?")
|
|
|
|
args.append(sender)
|
|
|
|
|
|
|
|
if event_filter.rooms:
|
|
|
|
clauses.append(
|
|
|
|
"(%s)" % " OR ".join("room_id = ?" for _ in event_filter.rooms)
|
|
|
|
)
|
|
|
|
args.extend(event_filter.rooms)
|
|
|
|
|
|
|
|
for room_id in event_filter.not_rooms:
|
|
|
|
clauses.append("room_id != ?")
|
|
|
|
args.append(room_id)
|
|
|
|
|
|
|
|
if event_filter.contains_url:
|
|
|
|
clauses.append("contains_url = ?")
|
|
|
|
args.append(event_filter.contains_url)
|
|
|
|
|
|
|
|
return " AND ".join(clauses), args
|
|
|
|
|
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
2018-03-01 09:20:57 -07:00
|
|
|
"""This is an abstract base class where subclasses must implement
|
|
|
|
`get_room_max_stream_ordering` and `get_room_min_stream_ordering`
|
|
|
|
which can be called in the initializer.
|
|
|
|
"""
|
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
__metaclass__ = abc.ABCMeta
|
2015-02-25 08:00:59 -07:00
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
def __init__(self, db_conn, hs):
|
|
|
|
super(StreamWorkerStore, self).__init__(db_conn, hs)
|
2015-02-26 09:23:01 -07:00
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
events_max = self.get_room_max_stream_ordering()
|
|
|
|
event_cache_prefill, min_event_val = self._get_cache_dict(
|
|
|
|
db_conn, "events",
|
|
|
|
entity_column="room_id",
|
|
|
|
stream_column="stream_ordering",
|
|
|
|
max_value=events_max,
|
|
|
|
)
|
|
|
|
self._events_stream_cache = StreamChangeCache(
|
|
|
|
"EventsRoomStreamChangeCache", min_event_val,
|
|
|
|
prefilled_cache=event_cache_prefill,
|
|
|
|
)
|
|
|
|
self._membership_stream_cache = StreamChangeCache(
|
|
|
|
"MembershipStreamChangeCache", events_max,
|
2016-06-03 10:12:48 -06:00
|
|
|
)
|
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
self._stream_order_on_start = self.get_room_max_stream_ordering()
|
2015-02-25 08:00:59 -07:00
|
|
|
|
2018-03-01 09:20:57 -07:00
|
|
|
@abc.abstractmethod
|
|
|
|
def get_room_max_stream_ordering(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
@abc.abstractmethod
|
|
|
|
def get_room_min_stream_ordering(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2016-01-27 02:54:30 -07:00
|
|
|
@defer.inlineCallbacks
|
2016-02-11 03:07:27 -07:00
|
|
|
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0,
|
|
|
|
order='DESC'):
|
2016-01-27 02:54:30 -07:00
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key).stream
|
|
|
|
|
2016-01-28 09:37:41 -07:00
|
|
|
room_ids = yield self._events_stream_cache.get_entities_changed(
|
2016-01-28 08:02:37 -07:00
|
|
|
room_ids, from_id
|
2016-01-27 02:54:30 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
if not room_ids:
|
|
|
|
defer.returnValue({})
|
|
|
|
|
|
|
|
results = {}
|
|
|
|
room_ids = list(room_ids)
|
2018-04-28 05:57:00 -06:00
|
|
|
for rm_ids in (room_ids[i:i + 20] for i in range(0, len(room_ids), 20)):
|
2017-11-14 04:22:42 -07:00
|
|
|
res = yield make_deferred_yieldable(defer.gatherResults([
|
2018-04-27 04:29:27 -06:00
|
|
|
run_in_background(
|
|
|
|
self.get_room_events_stream_for_room,
|
2016-02-11 03:07:27 -07:00
|
|
|
room_id, from_key, to_key, limit, order=order,
|
2016-02-04 03:22:44 -07:00
|
|
|
)
|
2016-02-11 08:02:56 -07:00
|
|
|
for room_id in rm_ids
|
2018-04-27 04:16:28 -06:00
|
|
|
], consumeErrors=True))
|
2016-02-04 03:22:44 -07:00
|
|
|
results.update(dict(zip(rm_ids, res)))
|
2016-01-27 02:54:30 -07:00
|
|
|
|
|
|
|
defer.returnValue(results)
|
|
|
|
|
2017-02-01 10:33:16 -07:00
|
|
|
def get_rooms_that_changed(self, room_ids, from_key):
|
2017-02-01 10:35:23 -07:00
|
|
|
"""Given a list of rooms and a token, return rooms where there may have
|
|
|
|
been changes.
|
2017-02-01 10:52:57 -07:00
|
|
|
|
|
|
|
Args:
|
|
|
|
room_ids (list)
|
|
|
|
from_key (str): The room_key portion of a StreamToken
|
2017-02-01 10:35:23 -07:00
|
|
|
"""
|
2017-02-01 10:33:16 -07:00
|
|
|
from_key = RoomStreamToken.parse_stream_token(from_key).stream
|
|
|
|
return set(
|
|
|
|
room_id for room_id in room_ids
|
|
|
|
if self._events_stream_cache.has_entity_changed(room_id, from_key)
|
|
|
|
)
|
|
|
|
|
2016-01-27 02:54:30 -07:00
|
|
|
@defer.inlineCallbacks
|
2016-02-11 03:07:27 -07:00
|
|
|
def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0,
|
|
|
|
order='DESC'):
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2018-05-09 04:59:45 -06:00
|
|
|
"""Get new room events in stream ordering since `from_key`.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
from_key (str): Token from which no events are returned before
|
|
|
|
to_key (str): Token from which no events are returned after. (This
|
|
|
|
is typically the current stream token)
|
|
|
|
limit (int): Maximum number of events to return
|
|
|
|
order (str): Either "DESC" or "ASC". Determines which events are
|
|
|
|
returned when the result is limited. If "DESC" then the most
|
|
|
|
recent `limit` events are returned, otherwise returns the
|
|
|
|
oldest `limit` events.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[tuple[list[FrozenEvent], str]]: Returns the list of
|
|
|
|
events (in ascending order) and the token from the start of
|
|
|
|
the chunk of events returned.
|
|
|
|
"""
|
2016-01-27 02:54:30 -07:00
|
|
|
if from_key == to_key:
|
|
|
|
defer.returnValue(([], from_key))
|
|
|
|
|
2018-05-09 04:59:45 -06:00
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key).stream
|
|
|
|
to_id = RoomStreamToken.parse_stream_token(to_key).stream
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2018-05-09 04:59:45 -06:00
|
|
|
has_changed = yield self._events_stream_cache.has_entity_changed(
|
|
|
|
room_id, from_id
|
|
|
|
)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2018-05-09 04:59:45 -06:00
|
|
|
if not has_changed:
|
|
|
|
defer.returnValue(([], from_key))
|
2016-01-27 02:54:30 -07:00
|
|
|
|
|
|
|
def f(txn):
|
2018-05-09 04:59:45 -06:00
|
|
|
sql = (
|
|
|
|
"SELECT event_id, stream_ordering FROM events WHERE"
|
|
|
|
" room_id = ?"
|
|
|
|
" AND not outlier"
|
|
|
|
" AND stream_ordering > ? AND stream_ordering <= ?"
|
|
|
|
" ORDER BY stream_ordering %s LIMIT ?"
|
|
|
|
) % (order,)
|
|
|
|
txn.execute(sql, (room_id, from_id, to_id, limit))
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2018-05-21 10:39:06 -06:00
|
|
|
rows = [_EventDictReturn(row[0], None, None, row[1]) for row in txn]
|
2016-02-02 08:58:14 -07:00
|
|
|
return rows
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
rows = yield self.runInteraction("get_room_events_stream_for_room", f)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
ret = yield self._get_events(
|
2018-05-09 04:18:23 -06:00
|
|
|
[r.event_id for r in rows],
|
2016-02-02 08:58:14 -07:00
|
|
|
get_prev_content=True
|
|
|
|
)
|
2016-01-28 04:52:34 -07:00
|
|
|
|
2016-03-09 09:11:53 -07:00
|
|
|
self._set_before_and_after(ret, rows, topo_order=from_id is None)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-11 03:07:27 -07:00
|
|
|
if order.lower() == "desc":
|
|
|
|
ret.reverse()
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
if rows:
|
2018-05-09 04:18:23 -06:00
|
|
|
key = "s%d" % min(r.stream_ordering for r in rows)
|
2016-02-02 08:58:14 -07:00
|
|
|
else:
|
|
|
|
# Assume we didn't get anything because there was nothing to
|
|
|
|
# get.
|
|
|
|
key = from_key
|
|
|
|
|
|
|
|
defer.returnValue((ret, key))
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-02-02 09:12:10 -07:00
|
|
|
def get_membership_changes_for_user(self, user_id, from_key, to_key):
|
2018-05-09 07:31:32 -06:00
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key).stream
|
2016-01-27 02:54:30 -07:00
|
|
|
to_id = RoomStreamToken.parse_stream_token(to_key).stream
|
|
|
|
|
|
|
|
if from_key == to_key:
|
2016-02-02 08:58:14 -07:00
|
|
|
defer.returnValue([])
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-01-29 09:52:48 -07:00
|
|
|
if from_id:
|
|
|
|
has_changed = self._membership_stream_cache.has_entity_changed(
|
|
|
|
user_id, int(from_id)
|
|
|
|
)
|
|
|
|
if not has_changed:
|
2016-02-02 08:58:14 -07:00
|
|
|
defer.returnValue([])
|
2016-01-29 09:52:48 -07:00
|
|
|
|
2016-01-27 02:54:30 -07:00
|
|
|
def f(txn):
|
2018-05-09 07:31:32 -06:00
|
|
|
sql = (
|
|
|
|
"SELECT m.event_id, stream_ordering FROM events AS e,"
|
|
|
|
" room_memberships AS m"
|
|
|
|
" WHERE e.event_id = m.event_id"
|
|
|
|
" AND m.user_id = ?"
|
|
|
|
" AND e.stream_ordering > ? AND e.stream_ordering <= ?"
|
|
|
|
" ORDER BY e.stream_ordering ASC"
|
|
|
|
)
|
|
|
|
txn.execute(sql, (user_id, from_id, to_id,))
|
2018-05-09 04:18:23 -06:00
|
|
|
|
2018-05-21 10:39:06 -06:00
|
|
|
rows = [_EventDictReturn(row[0], None, None, row[1]) for row in txn]
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
return rows
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 09:12:10 -07:00
|
|
|
rows = yield self.runInteraction("get_membership_changes_for_user", f)
|
2016-02-02 08:58:14 -07:00
|
|
|
|
|
|
|
ret = yield self._get_events(
|
2018-05-09 04:18:23 -06:00
|
|
|
[r.event_id for r in rows],
|
2016-02-02 08:58:14 -07:00
|
|
|
get_prev_content=True
|
|
|
|
)
|
2016-02-01 09:26:51 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
self._set_before_and_after(ret, rows, topo_order=False)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
defer.returnValue(ret)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-03-23 05:42:50 -06:00
|
|
|
@defer.inlineCallbacks
|
2018-05-09 03:58:16 -06:00
|
|
|
def get_recent_events_for_room(self, room_id, limit, end_token):
|
2018-05-09 04:55:34 -06:00
|
|
|
"""Get the most recent events in the room in topological ordering.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
limit (int)
|
|
|
|
end_token (str): The stream token representing now.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[tuple[list[FrozenEvent], str]]: Returns a list of
|
2018-05-09 08:42:39 -06:00
|
|
|
events and a token pointing to the start of the returned
|
2018-05-09 04:55:34 -06:00
|
|
|
events.
|
|
|
|
The events returned are in ascending order.
|
|
|
|
"""
|
|
|
|
|
2016-03-23 05:42:50 -06:00
|
|
|
rows, token = yield self.get_recent_event_ids_for_room(
|
2018-05-09 03:58:16 -06:00
|
|
|
room_id, limit, end_token,
|
2016-03-23 05:42:50 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug("stream before")
|
|
|
|
events = yield self._get_events(
|
2018-05-09 04:18:23 -06:00
|
|
|
[r.event_id for r in rows],
|
2016-03-23 05:42:50 -06:00
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
logger.debug("stream after")
|
|
|
|
|
|
|
|
self._set_before_and_after(events, rows)
|
|
|
|
|
2018-05-09 04:55:34 -06:00
|
|
|
defer.returnValue((events, token))
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2018-05-09 03:53:29 -06:00
|
|
|
@defer.inlineCallbacks
|
2018-05-09 03:58:16 -06:00
|
|
|
def get_recent_event_ids_for_room(self, room_id, limit, end_token):
|
2018-05-09 03:53:29 -06:00
|
|
|
"""Get the most recent events in the room in topological ordering.
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2018-05-09 03:53:29 -06:00
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
limit (int)
|
|
|
|
end_token (str): The stream token representing now.
|
2014-08-18 09:20:21 -06:00
|
|
|
|
2018-05-09 03:53:29 -06:00
|
|
|
Returns:
|
2018-05-09 04:18:23 -06:00
|
|
|
Deferred[tuple[list[_EventDictReturn], str]]: Returns a list of
|
2018-05-09 08:15:38 -06:00
|
|
|
_EventDictReturn and a token pointing to the start of the returned
|
2018-05-09 04:18:23 -06:00
|
|
|
events.
|
|
|
|
The events returned are in ascending order.
|
2018-05-09 03:53:29 -06:00
|
|
|
"""
|
|
|
|
# Allow a zero limit here, and no-op.
|
|
|
|
if limit == 0:
|
2018-05-09 04:18:23 -06:00
|
|
|
defer.returnValue(([], end_token))
|
2014-08-18 09:20:21 -06:00
|
|
|
|
2018-05-09 04:58:35 -06:00
|
|
|
end_token = RoomStreamToken.parse(end_token)
|
2015-01-06 06:03:23 -07:00
|
|
|
|
2018-05-21 10:41:10 -06:00
|
|
|
rows, token, _ = yield self.runInteraction(
|
2018-05-09 03:53:29 -06:00
|
|
|
"get_recent_event_ids_for_room", self._paginate_room_events_txn,
|
2018-05-09 03:58:16 -06:00
|
|
|
room_id, from_token=end_token, limit=limit,
|
2018-05-09 03:53:29 -06:00
|
|
|
)
|
2015-01-06 06:03:23 -07:00
|
|
|
|
2018-05-09 03:53:29 -06:00
|
|
|
# We want to return the results in ascending order.
|
|
|
|
rows.reverse()
|
2014-09-05 19:23:36 -06:00
|
|
|
|
2018-05-09 04:18:23 -06:00
|
|
|
defer.returnValue((rows, token))
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2018-03-01 16:20:54 -07:00
|
|
|
def get_room_event_after_stream_ordering(self, room_id, stream_ordering):
|
|
|
|
"""Gets details of the first event in a room at or after a stream ordering
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str):
|
|
|
|
stream_ordering (int):
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[(int, int, str)]:
|
|
|
|
(stream ordering, topological ordering, event_id)
|
|
|
|
"""
|
|
|
|
def _f(txn):
|
|
|
|
sql = (
|
|
|
|
"SELECT stream_ordering, topological_ordering, event_id"
|
|
|
|
" FROM events"
|
|
|
|
" WHERE room_id = ? AND stream_ordering >= ?"
|
|
|
|
" AND NOT outlier"
|
|
|
|
" ORDER BY stream_ordering"
|
|
|
|
" LIMIT 1"
|
|
|
|
)
|
|
|
|
txn.execute(sql, (room_id, stream_ordering, ))
|
|
|
|
return txn.fetchone()
|
|
|
|
|
|
|
|
return self.runInteraction(
|
|
|
|
"get_room_event_after_stream_ordering", _f,
|
|
|
|
)
|
|
|
|
|
2015-04-09 04:41:36 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-10-24 06:35:51 -06:00
|
|
|
def get_room_events_max_id(self, room_id=None):
|
|
|
|
"""Returns the current token for rooms stream.
|
|
|
|
|
|
|
|
By default, it returns the current global stream token. Specifying a
|
|
|
|
`room_id` causes it to return the current room specific topological
|
|
|
|
token.
|
|
|
|
"""
|
|
|
|
if room_id is None:
|
2018-05-21 10:39:06 -06:00
|
|
|
token = yield self.get_room_max_stream_ordering()
|
|
|
|
defer.returnValue(str(RoomStreamToken(None, None, token)))
|
2015-05-12 03:28:10 -06:00
|
|
|
else:
|
2018-05-21 10:39:06 -06:00
|
|
|
token = yield self.runInteraction(
|
|
|
|
"get_room_events_max_id", self._get_topological_token_for_room_txn,
|
2016-10-24 06:35:51 -06:00
|
|
|
room_id,
|
2015-05-12 03:28:10 -06:00
|
|
|
)
|
2018-05-21 10:39:06 -06:00
|
|
|
if not token:
|
|
|
|
raise Exception("Server not in room")
|
|
|
|
defer.returnValue(str(token))
|
2015-05-12 03:28:10 -06:00
|
|
|
|
2015-09-09 06:25:22 -06:00
|
|
|
def get_stream_token_for_event(self, event_id):
|
|
|
|
"""The stream token for an event
|
|
|
|
Args:
|
|
|
|
event_id(str): The id of the event to look up a stream token for.
|
|
|
|
Raises:
|
|
|
|
StoreError if the event wasn't in the database.
|
|
|
|
Returns:
|
|
|
|
A deferred "s%d" stream token.
|
|
|
|
"""
|
|
|
|
return self._simple_select_one_onecol(
|
|
|
|
table="events",
|
|
|
|
keyvalues={"event_id": event_id},
|
|
|
|
retcol="stream_ordering",
|
2018-05-21 10:39:06 -06:00
|
|
|
).addCallback(lambda row: str(RoomStreamToken(None, None, row)))
|
2015-09-09 10:31:09 -06:00
|
|
|
|
|
|
|
def get_topological_token_for_event(self, event_id):
|
|
|
|
"""The stream token for an event
|
|
|
|
Args:
|
|
|
|
event_id(str): The id of the event to look up a stream token for.
|
|
|
|
Raises:
|
|
|
|
StoreError if the event wasn't in the database.
|
|
|
|
Returns:
|
2018-05-21 10:39:06 -06:00
|
|
|
A deferred topological token.
|
2015-09-09 10:31:09 -06:00
|
|
|
"""
|
|
|
|
return self._simple_select_one(
|
|
|
|
table="events",
|
|
|
|
keyvalues={"event_id": event_id},
|
2018-05-21 10:39:06 -06:00
|
|
|
retcols=("stream_ordering", "topological_ordering", "chunk_id"),
|
2016-02-03 09:22:35 -07:00
|
|
|
desc="get_topological_token_for_event",
|
2018-05-21 10:39:06 -06:00
|
|
|
).addCallback(lambda row: str(RoomStreamToken(
|
|
|
|
row["chunk_id"],
|
|
|
|
row["topological_ordering"],
|
|
|
|
row["stream_ordering"],
|
|
|
|
)))
|
|
|
|
|
|
|
|
def _get_topological_token_for_room_txn(self, txn, room_id):
|
|
|
|
sql = """
|
|
|
|
SELECT chunk_id, topological_ordering, stream_ordering
|
|
|
|
FROM events
|
|
|
|
NATURAL JOIN event_forward_extremities
|
|
|
|
WHERE room_id = ?
|
|
|
|
ORDER BY stream_ordering DESC
|
|
|
|
LIMIT 1
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (room_id,))
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row:
|
|
|
|
c, t, s = row
|
|
|
|
return RoomStreamToken(c, t, s)
|
|
|
|
return None
|
2015-09-09 06:25:22 -06:00
|
|
|
|
2016-07-05 08:30:25 -06:00
|
|
|
def get_max_topological_token(self, room_id, stream_key):
|
2016-01-28 04:52:34 -07:00
|
|
|
sql = (
|
|
|
|
"SELECT max(topological_ordering) FROM events"
|
|
|
|
" WHERE room_id = ? AND stream_ordering < ?"
|
|
|
|
)
|
|
|
|
return self._execute(
|
2016-07-05 08:30:25 -06:00
|
|
|
"get_max_topological_token", None,
|
2016-01-28 04:52:34 -07:00
|
|
|
sql, room_id, stream_key,
|
|
|
|
).addCallback(
|
|
|
|
lambda r: r[0][0] if r else 0
|
|
|
|
)
|
|
|
|
|
2016-10-24 06:35:51 -06:00
|
|
|
def _get_max_topological_txn(self, txn, room_id):
|
2015-05-12 03:28:10 -06:00
|
|
|
txn.execute(
|
|
|
|
"SELECT MAX(topological_ordering) FROM events"
|
2016-10-24 06:35:51 -06:00
|
|
|
" WHERE room_id = ?",
|
|
|
|
(room_id,)
|
2015-05-12 03:28:10 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
rows = txn.fetchall()
|
|
|
|
return rows[0][0] if rows else 0
|
2014-08-26 07:31:48 -06:00
|
|
|
|
2015-01-30 04:32:35 -07:00
|
|
|
@staticmethod
|
2016-01-28 04:34:17 -07:00
|
|
|
def _set_before_and_after(events, rows, topo_order=True):
|
2018-05-09 04:18:23 -06:00
|
|
|
"""Inserts ordering information to events' internal metadata from
|
|
|
|
the DB rows.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
events (list[FrozenEvent])
|
|
|
|
rows (list[_EventDictReturn])
|
|
|
|
topo_order (bool): Whether the events were ordered topologically
|
2018-05-09 08:15:38 -06:00
|
|
|
or by stream ordering. If true then all rows should have a non
|
|
|
|
null topological_ordering.
|
2018-05-09 04:18:23 -06:00
|
|
|
"""
|
2015-01-30 04:32:35 -07:00
|
|
|
for event, row in zip(events, rows):
|
2018-05-21 10:39:06 -06:00
|
|
|
chunk = row.chunk_id
|
|
|
|
topo = row.topological_ordering
|
2018-05-09 04:18:23 -06:00
|
|
|
stream = row.stream_ordering
|
2018-05-21 10:39:06 -06:00
|
|
|
|
2015-01-30 04:32:35 -07:00
|
|
|
internal = event.internal_metadata
|
2018-06-01 04:51:11 -06:00
|
|
|
|
|
|
|
internal.stream_ordering = stream
|
|
|
|
|
2018-06-01 04:43:03 -06:00
|
|
|
if topo_order:
|
2018-05-21 10:39:06 -06:00
|
|
|
internal.before = str(RoomStreamToken(chunk, topo, stream - 1))
|
|
|
|
internal.after = str(RoomStreamToken(chunk, topo, stream))
|
|
|
|
else:
|
|
|
|
internal.before = str(RoomStreamToken(None, None, stream - 1))
|
|
|
|
internal.after = str(RoomStreamToken(None, None, stream))
|
2015-10-28 07:45:56 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_events_around(self, room_id, event_id, before_limit, after_limit):
|
2015-10-28 08:05:50 -06:00
|
|
|
"""Retrieve events and pagination tokens around a given event in a
|
|
|
|
room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
event_id (str)
|
|
|
|
before_limit (int)
|
|
|
|
after_limit (int)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict
|
|
|
|
"""
|
|
|
|
|
2015-10-28 07:45:56 -06:00
|
|
|
results = yield self.runInteraction(
|
|
|
|
"get_events_around", self._get_events_around_txn,
|
|
|
|
room_id, event_id, before_limit, after_limit
|
|
|
|
)
|
|
|
|
|
|
|
|
events_before = yield self._get_events(
|
|
|
|
[e for e in results["before"]["event_ids"]],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
|
|
|
events_after = yield self._get_events(
|
|
|
|
[e for e in results["after"]["event_ids"]],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue({
|
|
|
|
"events_before": events_before,
|
|
|
|
"events_after": events_after,
|
|
|
|
"start": results["before"]["token"],
|
|
|
|
"end": results["after"]["token"],
|
|
|
|
})
|
|
|
|
|
|
|
|
def _get_events_around_txn(self, txn, room_id, event_id, before_limit, after_limit):
|
2015-10-28 08:05:50 -06:00
|
|
|
"""Retrieves event_ids and pagination tokens around a given event in a
|
|
|
|
room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
event_id (str)
|
|
|
|
before_limit (int)
|
|
|
|
after_limit (int)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict
|
|
|
|
"""
|
|
|
|
|
2015-10-28 07:45:56 -06:00
|
|
|
results = self._simple_select_one_txn(
|
|
|
|
txn,
|
|
|
|
"events",
|
|
|
|
keyvalues={
|
|
|
|
"event_id": event_id,
|
|
|
|
"room_id": room_id,
|
|
|
|
},
|
2018-05-21 10:39:06 -06:00
|
|
|
retcols=["stream_ordering", "topological_ordering", "chunk_id"],
|
2015-10-28 07:45:56 -06:00
|
|
|
)
|
|
|
|
|
2018-05-08 09:19:33 -06:00
|
|
|
# Paginating backwards includes the event at the token, but paginating
|
|
|
|
# forward doesn't.
|
|
|
|
before_token = RoomStreamToken(
|
2018-05-21 10:39:06 -06:00
|
|
|
results["chunk_id"],
|
|
|
|
results["topological_ordering"],
|
|
|
|
results["stream_ordering"] - 1,
|
2015-10-28 07:45:56 -06:00
|
|
|
)
|
|
|
|
|
2018-05-08 09:19:33 -06:00
|
|
|
after_token = RoomStreamToken(
|
2018-05-21 10:39:06 -06:00
|
|
|
results["chunk_id"],
|
2018-05-08 09:19:33 -06:00
|
|
|
results["topological_ordering"],
|
|
|
|
results["stream_ordering"],
|
|
|
|
)
|
2015-10-28 07:45:56 -06:00
|
|
|
|
2018-05-21 10:41:10 -06:00
|
|
|
rows, start_token, _ = self._paginate_room_events_txn(
|
2018-05-08 09:19:33 -06:00
|
|
|
txn, room_id, before_token, direction='b', limit=before_limit,
|
|
|
|
)
|
2018-05-09 04:18:23 -06:00
|
|
|
events_before = [r.event_id for r in rows]
|
2015-10-28 07:45:56 -06:00
|
|
|
|
2018-05-21 10:41:10 -06:00
|
|
|
rows, end_token, _ = self._paginate_room_events_txn(
|
2018-05-08 09:19:33 -06:00
|
|
|
txn, room_id, after_token, direction='f', limit=after_limit,
|
|
|
|
)
|
2018-05-09 04:18:23 -06:00
|
|
|
events_after = [r.event_id for r in rows]
|
2015-10-28 07:45:56 -06:00
|
|
|
|
|
|
|
return {
|
|
|
|
"before": {
|
|
|
|
"event_ids": events_before,
|
|
|
|
"token": start_token,
|
|
|
|
},
|
|
|
|
"after": {
|
|
|
|
"event_ids": events_after,
|
|
|
|
"token": end_token,
|
|
|
|
},
|
|
|
|
}
|
2016-11-17 08:46:44 -07:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_all_new_events_stream(self, from_id, current_id, limit):
|
|
|
|
"""Get all new events"""
|
|
|
|
|
|
|
|
def get_all_new_events_stream_txn(txn):
|
|
|
|
sql = (
|
|
|
|
"SELECT e.stream_ordering, e.event_id"
|
|
|
|
" FROM events AS e"
|
|
|
|
" WHERE"
|
|
|
|
" ? < e.stream_ordering AND e.stream_ordering <= ?"
|
|
|
|
" ORDER BY e.stream_ordering ASC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(sql, (from_id, current_id, limit))
|
|
|
|
rows = txn.fetchall()
|
|
|
|
|
|
|
|
upper_bound = current_id
|
|
|
|
if len(rows) == limit:
|
|
|
|
upper_bound = rows[-1][0]
|
|
|
|
|
|
|
|
return upper_bound, [row[1] for row in rows]
|
|
|
|
|
|
|
|
upper_bound, event_ids = yield self.runInteraction(
|
|
|
|
"get_all_new_events_stream", get_all_new_events_stream_txn,
|
|
|
|
)
|
|
|
|
|
|
|
|
events = yield self._get_events(event_ids)
|
|
|
|
|
|
|
|
defer.returnValue((upper_bound, events))
|
2016-11-21 04:28:37 -07:00
|
|
|
|
|
|
|
def get_federation_out_pos(self, typ):
|
|
|
|
return self._simple_select_one_onecol(
|
|
|
|
table="federation_stream_position",
|
|
|
|
retcol="stream_id",
|
|
|
|
keyvalues={"type": typ},
|
|
|
|
desc="get_federation_out_pos"
|
|
|
|
)
|
|
|
|
|
|
|
|
def update_federation_out_pos(self, typ, stream_id):
|
|
|
|
return self._simple_update_one(
|
|
|
|
table="federation_stream_position",
|
|
|
|
keyvalues={"type": typ},
|
|
|
|
updatevalues={"stream_id": stream_id},
|
|
|
|
desc="update_federation_out_pos",
|
|
|
|
)
|
2017-03-13 03:50:10 -06:00
|
|
|
|
|
|
|
def has_room_changed_since(self, room_id, stream_id):
|
|
|
|
return self._events_stream_cache.has_entity_changed(room_id, stream_id)
|
2018-03-01 06:56:03 -07:00
|
|
|
|
2018-05-09 02:55:19 -06:00
|
|
|
def _paginate_room_events_txn(self, txn, room_id, from_token, to_token=None,
|
|
|
|
direction='b', limit=-1, event_filter=None):
|
2018-05-08 08:45:38 -06:00
|
|
|
"""Returns list of events before or after a given token.
|
2018-03-01 06:56:03 -07:00
|
|
|
|
2018-05-08 08:45:38 -06:00
|
|
|
Args:
|
|
|
|
txn
|
|
|
|
room_id (str)
|
2018-05-08 09:18:58 -06:00
|
|
|
from_token (RoomStreamToken): The token used to stream from
|
|
|
|
to_token (RoomStreamToken|None): A token which if given limits the
|
|
|
|
results to only those before
|
2018-05-08 08:45:38 -06:00
|
|
|
direction(char): Either 'b' or 'f' to indicate whether we are
|
|
|
|
paginating forwards or backwards from `from_key`.
|
2018-05-22 11:15:21 -06:00
|
|
|
limit (int): The maximum number of events to return.
|
2018-05-08 08:45:38 -06:00
|
|
|
event_filter (Filter|None): If provided filters the events to
|
|
|
|
those that match the filter.
|
2018-03-01 06:56:03 -07:00
|
|
|
|
2018-05-08 08:45:38 -06:00
|
|
|
Returns:
|
2018-05-21 10:41:10 -06:00
|
|
|
Deferred[tuple[list[_EventDictReturn], str, list[int]]: Returns
|
|
|
|
the results as a list of _EventDictReturn, a token that points to
|
|
|
|
the end of the result set, and a list of chunks iterated over.
|
2018-05-08 08:45:38 -06:00
|
|
|
"""
|
2018-05-22 11:15:21 -06:00
|
|
|
|
2018-05-21 10:41:10 -06:00
|
|
|
limit = int(limit) # Sometimes we are passed a string from somewhere
|
|
|
|
assert limit >= 0
|
|
|
|
|
|
|
|
# There are two modes of fetching events: by stream order or by
|
|
|
|
# topological order. This is determined by whether the from_token is a
|
|
|
|
# stream or topological token. If stream then we can simply do a select
|
|
|
|
# ordered by stream_ordering column. If topological, then we need to
|
|
|
|
# fetch events from one chunk at a time until we hit the limit.
|
2018-05-22 11:15:21 -06:00
|
|
|
|
2018-03-01 06:56:03 -07:00
|
|
|
# Tokens really represent positions between elements, but we use
|
|
|
|
# the convention of pointing to the event before the gap. Hence
|
|
|
|
# we have a bit of asymmetry when it comes to equalities.
|
|
|
|
args = [False, room_id]
|
|
|
|
if direction == 'b':
|
|
|
|
order = "DESC"
|
|
|
|
bounds = upper_bound(
|
2018-05-08 09:18:58 -06:00
|
|
|
from_token, self.database_engine
|
2018-03-01 06:56:03 -07:00
|
|
|
)
|
2018-05-08 09:18:58 -06:00
|
|
|
if to_token:
|
2018-03-01 06:56:03 -07:00
|
|
|
bounds = "%s AND %s" % (bounds, lower_bound(
|
2018-05-08 09:18:58 -06:00
|
|
|
to_token, self.database_engine
|
2018-03-01 06:56:03 -07:00
|
|
|
))
|
|
|
|
else:
|
|
|
|
order = "ASC"
|
|
|
|
bounds = lower_bound(
|
2018-05-08 09:18:58 -06:00
|
|
|
from_token, self.database_engine
|
2018-03-01 06:56:03 -07:00
|
|
|
)
|
2018-05-08 09:18:58 -06:00
|
|
|
if to_token:
|
2018-03-01 06:56:03 -07:00
|
|
|
bounds = "%s AND %s" % (bounds, upper_bound(
|
2018-05-08 09:18:58 -06:00
|
|
|
to_token, self.database_engine
|
2018-03-01 06:56:03 -07:00
|
|
|
))
|
|
|
|
|
|
|
|
filter_clause, filter_args = filter_to_clause(event_filter)
|
|
|
|
|
|
|
|
if filter_clause:
|
|
|
|
bounds += " AND " + filter_clause
|
|
|
|
args.extend(filter_args)
|
|
|
|
|
2018-05-21 10:41:10 -06:00
|
|
|
args.append(limit)
|
2018-03-01 06:56:03 -07:00
|
|
|
|
|
|
|
sql = (
|
2018-05-21 10:39:06 -06:00
|
|
|
"SELECT event_id, chunk_id, topological_ordering, stream_ordering"
|
2018-05-08 09:15:07 -06:00
|
|
|
" FROM events"
|
2018-03-01 06:56:03 -07:00
|
|
|
" WHERE outlier = ? AND room_id = ? AND %(bounds)s"
|
|
|
|
" ORDER BY topological_ordering %(order)s,"
|
2018-05-22 11:15:21 -06:00
|
|
|
" stream_ordering %(order)s LIMIT ?"
|
2018-03-01 06:56:03 -07:00
|
|
|
) % {
|
|
|
|
"bounds": bounds,
|
|
|
|
"order": order,
|
|
|
|
}
|
|
|
|
|
2018-05-08 08:45:38 -06:00
|
|
|
txn.execute(sql, args)
|
2018-03-01 06:56:03 -07:00
|
|
|
|
2018-06-01 04:53:06 -06:00
|
|
|
rows = [_EventDictReturn(*row) for row in txn]
|
2018-05-21 10:41:10 -06:00
|
|
|
|
|
|
|
# If we are paginating topologically and we haven't hit the limit on
|
|
|
|
# number of events then we need to fetch events from the previous or
|
|
|
|
# next chunk.
|
|
|
|
|
|
|
|
iterated_chunks = []
|
|
|
|
|
|
|
|
chunk_id = None
|
|
|
|
if from_token.chunk: # FIXME: may be topological but no chunk.
|
|
|
|
if rows:
|
|
|
|
chunk_id = rows[-1].chunk_id
|
|
|
|
iterated_chunks = [r.chunk_id for r in rows]
|
|
|
|
else:
|
|
|
|
chunk_id = from_token.chunk
|
|
|
|
iterated_chunks = [chunk_id]
|
|
|
|
|
|
|
|
table = ChunkDBOrderedListStore(
|
|
|
|
txn, room_id, self.clock,
|
|
|
|
)
|
|
|
|
|
|
|
|
if filter_clause:
|
|
|
|
filter_clause = "AND " + filter_clause
|
|
|
|
|
|
|
|
sql = (
|
|
|
|
"SELECT event_id, chunk_id, topological_ordering, stream_ordering"
|
|
|
|
" FROM events"
|
|
|
|
" WHERE outlier = ? AND room_id = ? %(filter_clause)s"
|
|
|
|
" ORDER BY topological_ordering %(order)s,"
|
|
|
|
" stream_ordering %(order)s LIMIT ?"
|
|
|
|
) % {
|
|
|
|
"filter_clause": filter_clause,
|
|
|
|
"order": order,
|
|
|
|
}
|
|
|
|
|
|
|
|
args = [False, room_id] + filter_args + [limit]
|
|
|
|
|
|
|
|
while chunk_id and (limit <= 0 or len(rows) < limit):
|
|
|
|
if chunk_id not in iterated_chunks:
|
|
|
|
iterated_chunks.append(chunk_id)
|
|
|
|
|
|
|
|
if direction == 'b':
|
|
|
|
chunk_id = table.get_prev(chunk_id)
|
|
|
|
else:
|
|
|
|
chunk_id = table.get_next(chunk_id)
|
|
|
|
|
|
|
|
if chunk_id is None:
|
|
|
|
break
|
|
|
|
|
|
|
|
txn.execute(sql, args)
|
2018-06-01 04:53:06 -06:00
|
|
|
new_rows = [_EventDictReturn(*row) for row in txn]
|
2018-05-21 10:41:10 -06:00
|
|
|
|
|
|
|
rows.extend(new_rows)
|
|
|
|
|
2018-06-01 04:55:08 -06:00
|
|
|
# We may have inserted more rows than necessary in the loop above
|
|
|
|
rows = rows[:limit]
|
2018-03-01 06:56:03 -07:00
|
|
|
|
2018-05-08 08:45:38 -06:00
|
|
|
if rows:
|
2018-05-21 10:39:06 -06:00
|
|
|
chunk = rows[-1].chunk_id
|
2018-05-09 04:18:23 -06:00
|
|
|
topo = rows[-1].topological_ordering
|
|
|
|
toke = rows[-1].stream_ordering
|
2018-05-08 08:45:38 -06:00
|
|
|
if direction == 'b':
|
|
|
|
# Tokens are positions between events.
|
|
|
|
# This token points *after* the last event in the chunk.
|
|
|
|
# We need it to point to the event before it in the chunk
|
|
|
|
# when we are going backwards so we subtract one from the
|
|
|
|
# stream part.
|
|
|
|
toke -= 1
|
2018-05-21 10:39:06 -06:00
|
|
|
next_token = RoomStreamToken(chunk, topo, toke)
|
2018-05-08 08:45:38 -06:00
|
|
|
else:
|
|
|
|
# TODO (erikj): We should work out what to do here instead.
|
2018-05-08 09:18:58 -06:00
|
|
|
next_token = to_token if to_token else from_token
|
2018-05-08 08:45:38 -06:00
|
|
|
|
2018-05-21 10:39:06 -06:00
|
|
|
return rows, str(next_token), iterated_chunks,
|
2018-03-01 06:56:03 -07:00
|
|
|
|
2018-05-08 08:45:38 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def paginate_room_events(self, room_id, from_key, to_key=None,
|
|
|
|
direction='b', limit=-1, event_filter=None):
|
|
|
|
"""Returns list of events before or after a given token.
|
2018-03-01 06:56:03 -07:00
|
|
|
|
2018-05-08 08:45:38 -06:00
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
from_key (str): The token used to stream from
|
|
|
|
to_key (str|None): A token which if given limits the results to
|
|
|
|
only those before
|
|
|
|
direction(char): Either 'b' or 'f' to indicate whether we are
|
|
|
|
paginating forwards or backwards from `from_key`.
|
|
|
|
limit (int): The maximum number of events to return. Zero or less
|
|
|
|
means no limit.
|
|
|
|
event_filter (Filter|None): If provided filters the events to
|
|
|
|
those that match the filter.
|
|
|
|
|
|
|
|
Returns:
|
2018-05-21 10:41:10 -06:00
|
|
|
tuple[list[dict], str, list[str]]: Returns the results as a list of
|
|
|
|
dicts, a token that points to the end of the result set, and a list
|
|
|
|
of backwards extremities. The dicts have the keys "event_id",
|
|
|
|
"topological_ordering" and "stream_ordering".
|
2018-05-08 08:45:38 -06:00
|
|
|
"""
|
|
|
|
|
2018-05-08 09:18:58 -06:00
|
|
|
from_key = RoomStreamToken.parse(from_key)
|
|
|
|
if to_key:
|
|
|
|
to_key = RoomStreamToken.parse(to_key)
|
|
|
|
|
2018-05-21 10:41:10 -06:00
|
|
|
def _do_paginate_room_events(txn):
|
|
|
|
rows, token, chunks = self._paginate_room_events_txn(
|
|
|
|
txn, room_id, from_key, to_key, direction, limit, event_filter,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We now fetch the extremities by fetching the extremities for
|
|
|
|
# each chunk we iterated over.
|
|
|
|
extremities = []
|
|
|
|
seen = set()
|
|
|
|
for chunk_id in chunks:
|
|
|
|
if chunk_id in seen:
|
|
|
|
continue
|
|
|
|
seen.add(chunk_id)
|
|
|
|
|
|
|
|
event_ids = self._simple_select_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="chunk_backwards_extremities",
|
|
|
|
keyvalues={"chunk_id": chunk_id},
|
|
|
|
retcol="event_id"
|
|
|
|
)
|
|
|
|
|
|
|
|
extremities.extend(e for e in event_ids if e not in extremities)
|
|
|
|
|
|
|
|
return rows, token, extremities
|
|
|
|
|
|
|
|
rows, token, extremities = yield self.runInteraction(
|
|
|
|
"paginate_room_events", _do_paginate_room_events,
|
2018-05-08 08:45:38 -06:00
|
|
|
)
|
2018-03-01 06:56:03 -07:00
|
|
|
|
|
|
|
events = yield self._get_events(
|
2018-05-09 04:18:23 -06:00
|
|
|
[r.event_id for r in rows],
|
2018-03-01 06:56:03 -07:00
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
|
|
|
self._set_before_and_after(events, rows)
|
|
|
|
|
2018-05-21 10:41:10 -06:00
|
|
|
defer.returnValue((events, token, extremities))
|
2018-05-08 08:45:38 -06:00
|
|
|
|
2018-05-21 10:43:03 -06:00
|
|
|
def clamp_token_before(self, room_id, token_str, clamp_to_token_str):
|
|
|
|
"""For a given room returns the given token if its before
|
|
|
|
clamp_to, otherwise returns clamp_to.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
token_str (str)
|
|
|
|
clamp_to_token_str(str): Must be topological token
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[str]
|
|
|
|
"""
|
|
|
|
|
|
|
|
token = RoomStreamToken.parse(token_str)
|
|
|
|
clamp_to_token = RoomStreamToken.parse(clamp_to_token_str)
|
|
|
|
|
|
|
|
def clamp_token_before_txn(txn, token):
|
|
|
|
# If we're given a stream ordering, convert to topological token
|
|
|
|
if not token.chunk:
|
|
|
|
row = self._simple_select_one_txn(
|
|
|
|
txn,
|
|
|
|
table="events",
|
|
|
|
keyvalues={
|
|
|
|
"stream_ordering": token.stream,
|
|
|
|
},
|
|
|
|
retcols=("chunk_id", "topological_ordering", "stream_ordering",),
|
|
|
|
)
|
|
|
|
token = RoomStreamToken(*row)
|
|
|
|
|
|
|
|
# If both tokens have chunk_ids, we can use that.
|
|
|
|
if token.chunk and clamp_to_token.chunk:
|
|
|
|
if token.chunk == clamp_to_token.chunk:
|
|
|
|
if token.topological < clamp_to_token.topological:
|
|
|
|
return token_str
|
|
|
|
else:
|
|
|
|
return clamp_to_token_str
|
|
|
|
|
|
|
|
table = ChunkDBOrderedListStore(
|
|
|
|
txn, room_id, self.clock,
|
|
|
|
)
|
|
|
|
|
|
|
|
if table.is_before(token.chunk, clamp_to_token.chunk):
|
|
|
|
return token_str
|
|
|
|
else:
|
|
|
|
return clamp_to_token_str
|
|
|
|
|
|
|
|
# Ok, so we're dealing with events that haven't been chunked yet,
|
|
|
|
# lets just cheat and fallback to depth.
|
|
|
|
|
|
|
|
token_depth = self._simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="events",
|
|
|
|
keyvalues={
|
|
|
|
"stream_ordering": token.stream,
|
|
|
|
},
|
|
|
|
retcol="depth",
|
|
|
|
)
|
|
|
|
|
|
|
|
clamp_depth = self._simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="events",
|
|
|
|
keyvalues={
|
|
|
|
"stream_ordering": clamp_to_token.stream,
|
|
|
|
},
|
|
|
|
retcol="depth",
|
|
|
|
)
|
|
|
|
|
|
|
|
if token_depth < clamp_depth:
|
|
|
|
return token_str
|
|
|
|
else:
|
|
|
|
return clamp_to_token_str
|
|
|
|
|
|
|
|
return self.runInteraction(
|
|
|
|
"clamp_token_before", clamp_token_before_txn, token
|
|
|
|
)
|
|
|
|
|
2018-05-08 08:45:38 -06:00
|
|
|
|
|
|
|
class StreamStore(StreamWorkerStore):
|
|
|
|
def get_room_max_stream_ordering(self):
|
|
|
|
return self._stream_id_gen.get_current_token()
|
|
|
|
|
|
|
|
def get_room_min_stream_ordering(self):
|
|
|
|
return self._backfill_id_gen.get_current_token()
|