2014-08-12 08:10:52 -06:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 21:26:29 -07:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2014-08-12 08:10:52 -06:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-08-19 09:40:25 -06:00
|
|
|
""" This module is responsible for getting events from the DB for pagination
|
|
|
|
and event streaming.
|
|
|
|
|
|
|
|
The order it returns events in depend on whether we are streaming forwards or
|
|
|
|
are paginating backwards. We do this because we want to handle out of order
|
|
|
|
messages nicely, while still returning them in the correct order when we
|
|
|
|
paginate bacwards.
|
|
|
|
|
|
|
|
This is implemented by keeping two ordering columns: stream_ordering and
|
|
|
|
topological_ordering. Stream ordering is basically insertion/received order
|
2015-10-28 07:45:56 -06:00
|
|
|
(except for events from backfill requests). The topological_ordering is a
|
2014-08-19 09:40:25 -06:00
|
|
|
weak ordering of events based on the pdu graph.
|
|
|
|
|
|
|
|
This means that we have to have two different types of tokens, depending on
|
|
|
|
what sort order was used:
|
|
|
|
- stream tokens are of the form: "s%d", which maps directly to the column
|
|
|
|
- topological tokems: "t%d-%d", where the integers map to the topological
|
|
|
|
and stream ordering columns respectively.
|
|
|
|
"""
|
|
|
|
|
2014-08-14 11:40:50 -06:00
|
|
|
from twisted.internet import defer
|
2014-08-12 20:14:34 -06:00
|
|
|
|
2015-08-11 10:59:32 -06:00
|
|
|
from ._base import SQLBaseStore
|
2016-03-23 05:42:50 -06:00
|
|
|
from synapse.util.caches.descriptors import cached
|
2015-02-26 10:21:17 -07:00
|
|
|
from synapse.api.constants import EventTypes
|
2015-05-11 11:00:33 -06:00
|
|
|
from synapse.types import RoomStreamToken
|
2016-02-04 03:22:44 -07:00
|
|
|
from synapse.util.logcontext import preserve_fn
|
2014-08-14 11:01:39 -06:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
import logging
|
|
|
|
|
|
|
|
|
2014-08-14 11:01:39 -06:00
|
|
|
logger = logging.getLogger(__name__)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
|
|
|
|
2014-08-14 11:01:39 -06:00
|
|
|
MAX_STREAM_SIZE = 1000
|
2014-08-12 08:10:52 -06:00
|
|
|
|
|
|
|
|
2014-08-19 07:19:48 -06:00
|
|
|
_STREAM_TOKEN = "stream"
|
|
|
|
_TOPOLOGICAL_TOKEN = "topological"
|
|
|
|
|
|
|
|
|
2015-05-11 11:00:33 -06:00
|
|
|
def lower_bound(token):
|
|
|
|
if token.topological is None:
|
|
|
|
return "(%d < %s)" % (token.stream, "stream_ordering")
|
|
|
|
else:
|
|
|
|
return "(%d < %s OR (%d = %s AND %d < %s))" % (
|
|
|
|
token.topological, "topological_ordering",
|
|
|
|
token.topological, "topological_ordering",
|
|
|
|
token.stream, "stream_ordering",
|
|
|
|
)
|
2015-01-13 07:14:21 -07:00
|
|
|
|
2014-08-19 07:19:48 -06:00
|
|
|
|
2015-05-11 11:00:33 -06:00
|
|
|
def upper_bound(token):
|
|
|
|
if token.topological is None:
|
|
|
|
return "(%d >= %s)" % (token.stream, "stream_ordering")
|
|
|
|
else:
|
|
|
|
return "(%d > %s OR (%d = %s AND %d >= %s))" % (
|
|
|
|
token.topological, "topological_ordering",
|
|
|
|
token.topological, "topological_ordering",
|
|
|
|
token.stream, "stream_ordering",
|
|
|
|
)
|
2014-08-19 07:19:48 -06:00
|
|
|
|
|
|
|
|
2014-08-14 11:01:39 -06:00
|
|
|
class StreamStore(SQLBaseStore):
|
2015-02-26 09:23:01 -07:00
|
|
|
@defer.inlineCallbacks
|
2015-02-25 08:00:59 -07:00
|
|
|
def get_appservice_room_stream(self, service, from_key, to_key, limit=0):
|
|
|
|
# NB this lives here instead of appservice.py so we can reuse the
|
|
|
|
# 'private' StreamToken class in this file.
|
|
|
|
if limit:
|
|
|
|
limit = max(limit, MAX_STREAM_SIZE)
|
|
|
|
else:
|
|
|
|
limit = MAX_STREAM_SIZE
|
|
|
|
|
|
|
|
# From and to keys should be integers from ordering.
|
2015-05-11 11:00:33 -06:00
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key)
|
|
|
|
to_id = RoomStreamToken.parse_stream_token(to_key)
|
2015-02-25 08:00:59 -07:00
|
|
|
|
|
|
|
if from_key == to_key:
|
2015-02-26 09:23:01 -07:00
|
|
|
defer.returnValue(([], to_key))
|
|
|
|
return
|
|
|
|
|
|
|
|
# select all the events between from/to with a sensible limit
|
|
|
|
sql = (
|
2015-03-02 03:41:35 -07:00
|
|
|
"SELECT e.event_id, e.room_id, e.type, s.state_key, "
|
2015-04-30 11:32:03 -06:00
|
|
|
"e.stream_ordering FROM events AS e "
|
|
|
|
"LEFT JOIN state_events as s ON "
|
2015-03-02 03:41:35 -07:00
|
|
|
"e.event_id = s.event_id "
|
2015-02-26 09:23:01 -07:00
|
|
|
"WHERE e.stream_ordering > ? AND e.stream_ordering <= ? "
|
|
|
|
"ORDER BY stream_ordering ASC LIMIT %(limit)d "
|
|
|
|
) % {
|
|
|
|
"limit": limit
|
|
|
|
}
|
|
|
|
|
|
|
|
def f(txn):
|
2015-03-02 04:20:51 -07:00
|
|
|
# pull out all the events between the tokens
|
2015-02-26 09:23:01 -07:00
|
|
|
txn.execute(sql, (from_id.stream, to_id.stream,))
|
|
|
|
rows = self.cursor_to_dict(txn)
|
|
|
|
|
2015-03-02 04:20:51 -07:00
|
|
|
# Logic:
|
|
|
|
# - We want ALL events which match the AS room_id regex
|
|
|
|
# - We want ALL events which match the rooms represented by the AS
|
|
|
|
# room_alias regex
|
|
|
|
# - We want ALL events for rooms that AS users have joined.
|
|
|
|
# This is currently supported via get_app_service_rooms (which is
|
|
|
|
# used for the Notifier listener rooms). We can't reasonably make a
|
|
|
|
# SQL query for these room IDs, so we'll pull all the events between
|
|
|
|
# from/to and filter in python.
|
|
|
|
rooms_for_as = self._get_app_service_rooms_txn(txn, service)
|
|
|
|
room_ids_for_as = [r.room_id for r in rooms_for_as]
|
|
|
|
|
|
|
|
def app_service_interested(row):
|
|
|
|
if row["room_id"] in room_ids_for_as:
|
|
|
|
return True
|
|
|
|
|
|
|
|
if row["type"] == EventTypes.Member:
|
|
|
|
if service.is_interested_in_user(row.get("state_key")):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2015-02-26 09:23:01 -07:00
|
|
|
ret = self._get_events_txn(
|
|
|
|
txn,
|
|
|
|
# apply the filter on the room id list
|
|
|
|
[
|
|
|
|
r["event_id"] for r in rows
|
2015-02-26 10:21:17 -07:00
|
|
|
if app_service_interested(r)
|
2015-02-26 09:23:01 -07:00
|
|
|
],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
|
|
|
self._set_before_and_after(ret, rows)
|
|
|
|
|
|
|
|
if rows:
|
2015-03-02 02:53:00 -07:00
|
|
|
key = "s%d" % max(r["stream_ordering"] for r in rows)
|
2015-02-26 09:23:01 -07:00
|
|
|
else:
|
|
|
|
# Assume we didn't get anything because there was nothing to
|
|
|
|
# get.
|
|
|
|
key = to_key
|
|
|
|
|
|
|
|
return ret, key
|
2015-02-25 08:00:59 -07:00
|
|
|
|
2015-02-26 09:23:01 -07:00
|
|
|
results = yield self.runInteraction("get_appservice_room_stream", f)
|
|
|
|
defer.returnValue(results)
|
2015-02-25 08:00:59 -07:00
|
|
|
|
2016-01-27 02:54:30 -07:00
|
|
|
@defer.inlineCallbacks
|
2016-02-11 03:07:27 -07:00
|
|
|
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0,
|
|
|
|
order='DESC'):
|
2016-01-27 02:54:30 -07:00
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key).stream
|
|
|
|
|
2016-01-28 09:37:41 -07:00
|
|
|
room_ids = yield self._events_stream_cache.get_entities_changed(
|
2016-01-28 08:02:37 -07:00
|
|
|
room_ids, from_id
|
2016-01-27 02:54:30 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
if not room_ids:
|
|
|
|
defer.returnValue({})
|
|
|
|
|
|
|
|
results = {}
|
|
|
|
room_ids = list(room_ids)
|
2016-02-02 10:18:50 -07:00
|
|
|
for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)):
|
2016-01-27 02:54:30 -07:00
|
|
|
res = yield defer.gatherResults([
|
2016-02-04 03:22:44 -07:00
|
|
|
preserve_fn(self.get_room_events_stream_for_room)(
|
2016-02-11 03:07:27 -07:00
|
|
|
room_id, from_key, to_key, limit, order=order,
|
2016-02-04 03:22:44 -07:00
|
|
|
)
|
2016-02-11 08:02:56 -07:00
|
|
|
for room_id in rm_ids
|
2016-01-27 02:54:30 -07:00
|
|
|
])
|
2016-02-04 03:22:44 -07:00
|
|
|
results.update(dict(zip(rm_ids, res)))
|
2016-01-27 02:54:30 -07:00
|
|
|
|
|
|
|
defer.returnValue(results)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-02-11 03:07:27 -07:00
|
|
|
def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0,
|
|
|
|
order='DESC'):
|
2016-03-09 09:51:22 -07:00
|
|
|
# Note: If from_key is None then we return in topological order. This
|
|
|
|
# is because in that case we're using this as a "get the last few messages
|
|
|
|
# in a room" function, rather than "get new messages since last sync"
|
2016-01-27 02:54:30 -07:00
|
|
|
if from_key is not None:
|
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key).stream
|
|
|
|
else:
|
|
|
|
from_id = None
|
|
|
|
to_id = RoomStreamToken.parse_stream_token(to_key).stream
|
|
|
|
|
|
|
|
if from_key == to_key:
|
|
|
|
defer.returnValue(([], from_key))
|
|
|
|
|
2016-01-28 08:55:26 -07:00
|
|
|
if from_id:
|
2016-01-28 09:39:18 -07:00
|
|
|
has_changed = yield self._events_stream_cache.has_entity_changed(
|
2016-01-28 08:55:26 -07:00
|
|
|
room_id, from_id
|
|
|
|
)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-01-28 08:55:26 -07:00
|
|
|
if not has_changed:
|
|
|
|
defer.returnValue(([], from_key))
|
2016-01-27 02:54:30 -07:00
|
|
|
|
|
|
|
def f(txn):
|
|
|
|
if from_id is not None:
|
|
|
|
sql = (
|
|
|
|
"SELECT event_id, stream_ordering FROM events WHERE"
|
|
|
|
" room_id = ?"
|
|
|
|
" AND not outlier"
|
|
|
|
" AND stream_ordering > ? AND stream_ordering <= ?"
|
2016-02-11 03:07:27 -07:00
|
|
|
" ORDER BY stream_ordering %s LIMIT ?"
|
|
|
|
) % (order,)
|
2016-01-27 02:54:30 -07:00
|
|
|
txn.execute(sql, (room_id, from_id, to_id, limit))
|
|
|
|
else:
|
|
|
|
sql = (
|
|
|
|
"SELECT event_id, stream_ordering FROM events WHERE"
|
|
|
|
" room_id = ?"
|
|
|
|
" AND not outlier"
|
|
|
|
" AND stream_ordering <= ?"
|
2016-03-09 08:45:34 -07:00
|
|
|
" ORDER BY topological_ordering %s, stream_ordering %s LIMIT ?"
|
|
|
|
) % (order, order,)
|
2016-01-27 02:54:30 -07:00
|
|
|
txn.execute(sql, (room_id, to_id, limit))
|
|
|
|
|
|
|
|
rows = self.cursor_to_dict(txn)
|
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
return rows
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
rows = yield self.runInteraction("get_room_events_stream_for_room", f)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
ret = yield self._get_events(
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
2016-01-28 04:52:34 -07:00
|
|
|
|
2016-03-09 09:11:53 -07:00
|
|
|
self._set_before_and_after(ret, rows, topo_order=from_id is None)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-11 03:07:27 -07:00
|
|
|
if order.lower() == "desc":
|
|
|
|
ret.reverse()
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
if rows:
|
|
|
|
key = "s%d" % min(r["stream_ordering"] for r in rows)
|
|
|
|
else:
|
|
|
|
# Assume we didn't get anything because there was nothing to
|
|
|
|
# get.
|
|
|
|
key = from_key
|
|
|
|
|
|
|
|
defer.returnValue((ret, key))
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-02-02 09:12:10 -07:00
|
|
|
def get_membership_changes_for_user(self, user_id, from_key, to_key):
|
2016-01-27 02:54:30 -07:00
|
|
|
if from_key is not None:
|
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key).stream
|
|
|
|
else:
|
|
|
|
from_id = None
|
|
|
|
to_id = RoomStreamToken.parse_stream_token(to_key).stream
|
|
|
|
|
|
|
|
if from_key == to_key:
|
2016-02-02 08:58:14 -07:00
|
|
|
defer.returnValue([])
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-01-29 09:52:48 -07:00
|
|
|
if from_id:
|
|
|
|
has_changed = self._membership_stream_cache.has_entity_changed(
|
|
|
|
user_id, int(from_id)
|
|
|
|
)
|
|
|
|
if not has_changed:
|
2016-02-02 08:58:14 -07:00
|
|
|
defer.returnValue([])
|
2016-01-29 09:52:48 -07:00
|
|
|
|
2016-01-27 02:54:30 -07:00
|
|
|
def f(txn):
|
|
|
|
if from_id is not None:
|
|
|
|
sql = (
|
2016-01-27 10:11:04 -07:00
|
|
|
"SELECT m.event_id, stream_ordering FROM events AS e,"
|
|
|
|
" room_memberships AS m"
|
2016-01-27 02:54:30 -07:00
|
|
|
" WHERE e.event_id = m.event_id"
|
|
|
|
" AND m.user_id = ?"
|
|
|
|
" AND e.stream_ordering > ? AND e.stream_ordering <= ?"
|
|
|
|
" ORDER BY e.stream_ordering ASC"
|
|
|
|
)
|
|
|
|
txn.execute(sql, (user_id, from_id, to_id,))
|
|
|
|
else:
|
|
|
|
sql = (
|
2016-01-27 10:11:04 -07:00
|
|
|
"SELECT m.event_id, stream_ordering FROM events AS e,"
|
|
|
|
" room_memberships AS m"
|
2016-01-27 02:54:30 -07:00
|
|
|
" WHERE e.event_id = m.event_id"
|
|
|
|
" AND m.user_id = ?"
|
|
|
|
" AND stream_ordering <= ?"
|
|
|
|
" ORDER BY stream_ordering ASC"
|
|
|
|
)
|
|
|
|
txn.execute(sql, (user_id, to_id,))
|
|
|
|
rows = self.cursor_to_dict(txn)
|
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
return rows
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 09:12:10 -07:00
|
|
|
rows = yield self.runInteraction("get_membership_changes_for_user", f)
|
2016-02-02 08:58:14 -07:00
|
|
|
|
|
|
|
ret = yield self._get_events(
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
2016-02-01 09:26:51 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
self._set_before_and_after(ret, rows, topo_order=False)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2016-02-02 08:58:14 -07:00
|
|
|
defer.returnValue(ret)
|
2016-01-27 02:54:30 -07:00
|
|
|
|
2015-11-05 07:32:26 -07:00
|
|
|
def get_room_events_stream(
|
|
|
|
self,
|
|
|
|
user_id,
|
|
|
|
from_key,
|
|
|
|
to_key,
|
|
|
|
limit=0,
|
|
|
|
is_guest=False,
|
|
|
|
room_ids=None
|
|
|
|
):
|
|
|
|
room_ids = room_ids or []
|
|
|
|
room_ids = [r for r in room_ids]
|
|
|
|
if is_guest:
|
|
|
|
current_room_membership_sql = (
|
|
|
|
"SELECT c.room_id FROM history_visibility AS h"
|
|
|
|
" INNER JOIN current_state_events AS c"
|
|
|
|
" ON h.event_id = c.event_id"
|
2016-01-27 10:11:04 -07:00
|
|
|
" WHERE c.room_id IN (%s)"
|
|
|
|
" AND h.history_visibility = 'world_readable'" % (
|
2015-11-05 07:32:26 -07:00
|
|
|
",".join(map(lambda _: "?", room_ids))
|
|
|
|
)
|
|
|
|
)
|
|
|
|
current_room_membership_args = room_ids
|
|
|
|
else:
|
|
|
|
current_room_membership_sql = (
|
|
|
|
"SELECT m.room_id FROM room_memberships as m "
|
|
|
|
" INNER JOIN current_state_events as c"
|
|
|
|
" ON m.event_id = c.event_id AND c.state_key = m.user_id"
|
|
|
|
" WHERE m.user_id = ? AND m.membership = 'join'"
|
|
|
|
)
|
|
|
|
current_room_membership_args = [user_id]
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2014-08-20 09:07:20 -06:00
|
|
|
# We also want to get any membership events about that user, e.g.
|
|
|
|
# invites or leave notifications.
|
|
|
|
membership_sql = (
|
2014-08-15 08:28:54 -06:00
|
|
|
"SELECT m.event_id FROM room_memberships as m "
|
2014-08-14 11:40:50 -06:00
|
|
|
"INNER JOIN current_state_events as c ON m.event_id = c.event_id "
|
2014-08-20 09:07:20 -06:00
|
|
|
"WHERE m.user_id = ? "
|
2014-08-12 08:10:52 -06:00
|
|
|
)
|
2015-11-05 07:32:26 -07:00
|
|
|
membership_args = [user_id]
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2014-08-14 11:01:39 -06:00
|
|
|
if limit:
|
|
|
|
limit = max(limit, MAX_STREAM_SIZE)
|
|
|
|
else:
|
2014-08-15 09:04:54 -06:00
|
|
|
limit = MAX_STREAM_SIZE
|
2014-08-14 11:01:39 -06:00
|
|
|
|
2014-08-15 08:53:06 -06:00
|
|
|
# From and to keys should be integers from ordering.
|
2015-05-11 11:00:33 -06:00
|
|
|
from_id = RoomStreamToken.parse_stream_token(from_key)
|
|
|
|
to_id = RoomStreamToken.parse_stream_token(to_key)
|
2014-08-15 08:53:06 -06:00
|
|
|
|
|
|
|
if from_key == to_key:
|
2015-01-06 06:03:23 -07:00
|
|
|
return defer.succeed(([], to_key))
|
2014-08-15 08:53:06 -06:00
|
|
|
|
2014-08-14 11:01:39 -06:00
|
|
|
sql = (
|
2015-01-06 06:03:23 -07:00
|
|
|
"SELECT e.event_id, e.stream_ordering FROM events AS e WHERE "
|
2015-04-14 06:53:20 -06:00
|
|
|
"(e.outlier = ? AND (room_id IN (%(current)s)) OR "
|
2014-08-15 08:28:54 -06:00
|
|
|
"(event_id IN (%(invites)s))) "
|
2014-08-26 11:57:46 -06:00
|
|
|
"AND e.stream_ordering > ? AND e.stream_ordering <= ? "
|
2014-08-19 07:19:48 -06:00
|
|
|
"ORDER BY stream_ordering ASC LIMIT %(limit)d "
|
2014-08-14 11:01:39 -06:00
|
|
|
) % {
|
|
|
|
"current": current_room_membership_sql,
|
2014-08-20 09:07:20 -06:00
|
|
|
"invites": membership_sql,
|
2014-08-19 07:19:48 -06:00
|
|
|
"limit": limit
|
2014-08-14 11:01:39 -06:00
|
|
|
}
|
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
def f(txn):
|
2015-11-05 07:32:26 -07:00
|
|
|
args = ([False] + current_room_membership_args + membership_args +
|
|
|
|
[from_id.stream, to_id.stream])
|
|
|
|
txn.execute(sql, args)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
rows = self.cursor_to_dict(txn)
|
2014-08-15 08:28:54 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
ret = self._get_events_txn(
|
|
|
|
txn,
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
2015-01-30 04:32:35 -07:00
|
|
|
self._set_before_and_after(ret, rows)
|
2015-01-29 07:40:28 -07:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
if rows:
|
2015-03-02 02:53:00 -07:00
|
|
|
key = "s%d" % max(r["stream_ordering"] for r in rows)
|
2015-01-06 06:03:23 -07:00
|
|
|
else:
|
|
|
|
# Assume we didn't get anything because there was nothing to
|
|
|
|
# get.
|
|
|
|
key = to_key
|
2014-08-15 08:28:54 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
return ret, key
|
2014-08-15 08:28:54 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
return self.runInteraction("get_room_events_stream", f)
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2015-05-14 06:45:48 -06:00
|
|
|
@defer.inlineCallbacks
|
2014-08-19 07:19:48 -06:00
|
|
|
def paginate_room_events(self, room_id, from_key, to_key=None,
|
2015-09-09 09:05:09 -06:00
|
|
|
direction='b', limit=-1):
|
2014-08-29 06:28:02 -06:00
|
|
|
# Tokens really represent positions between elements, but we use
|
|
|
|
# the convention of pointing to the event before the gap. Hence
|
|
|
|
# we have a bit of asymmetry when it comes to equalities.
|
2015-04-14 06:53:20 -06:00
|
|
|
args = [False, room_id]
|
2015-01-13 07:14:21 -07:00
|
|
|
if direction == 'b':
|
|
|
|
order = "DESC"
|
2015-05-11 11:00:33 -06:00
|
|
|
bounds = upper_bound(RoomStreamToken.parse(from_key))
|
2015-01-13 07:14:21 -07:00
|
|
|
if to_key:
|
|
|
|
bounds = "%s AND %s" % (
|
2015-05-11 11:00:33 -06:00
|
|
|
bounds, lower_bound(RoomStreamToken.parse(to_key))
|
2015-01-13 07:14:21 -07:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
order = "ASC"
|
2015-05-11 11:00:33 -06:00
|
|
|
bounds = lower_bound(RoomStreamToken.parse(from_key))
|
2015-01-13 07:14:21 -07:00
|
|
|
if to_key:
|
|
|
|
bounds = "%s AND %s" % (
|
2015-05-11 11:00:33 -06:00
|
|
|
bounds, upper_bound(RoomStreamToken.parse(to_key))
|
2015-01-13 07:14:21 -07:00
|
|
|
)
|
2014-08-19 07:19:48 -06:00
|
|
|
|
|
|
|
if int(limit) > 0:
|
|
|
|
args.append(int(limit))
|
|
|
|
limit_str = " LIMIT ?"
|
|
|
|
else:
|
|
|
|
limit_str = ""
|
|
|
|
|
|
|
|
sql = (
|
2015-01-06 06:03:23 -07:00
|
|
|
"SELECT * FROM events"
|
2015-04-14 06:53:20 -06:00
|
|
|
" WHERE outlier = ? AND room_id = ? AND %(bounds)s"
|
2014-11-20 10:26:36 -07:00
|
|
|
" ORDER BY topological_ordering %(order)s,"
|
|
|
|
" stream_ordering %(order)s %(limit)s"
|
2014-09-23 08:28:32 -06:00
|
|
|
) % {
|
|
|
|
"bounds": bounds,
|
|
|
|
"order": order,
|
|
|
|
"limit": limit_str
|
|
|
|
}
|
2014-08-19 07:19:48 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
def f(txn):
|
|
|
|
txn.execute(sql, args)
|
|
|
|
|
|
|
|
rows = self.cursor_to_dict(txn)
|
|
|
|
|
|
|
|
if rows:
|
|
|
|
topo = rows[-1]["topological_ordering"]
|
|
|
|
toke = rows[-1]["stream_ordering"]
|
|
|
|
if direction == 'b':
|
2015-01-13 07:14:21 -07:00
|
|
|
# Tokens are positions between events.
|
|
|
|
# This token points *after* the last event in the chunk.
|
|
|
|
# We need it to point to the event before it in the chunk
|
|
|
|
# when we are going backwards so we subtract one from the
|
|
|
|
# stream part.
|
2015-01-06 06:03:23 -07:00
|
|
|
toke -= 1
|
2015-05-11 11:00:33 -06:00
|
|
|
next_token = str(RoomStreamToken(topo, toke))
|
2015-01-06 06:03:23 -07:00
|
|
|
else:
|
|
|
|
# TODO (erikj): We should work out what to do here instead.
|
|
|
|
next_token = to_key if to_key else from_key
|
|
|
|
|
2015-05-14 06:45:48 -06:00
|
|
|
return rows, next_token,
|
2014-08-19 07:19:48 -06:00
|
|
|
|
2015-05-14 06:45:48 -06:00
|
|
|
rows, token = yield self.runInteraction("paginate_room_events", f)
|
|
|
|
|
|
|
|
events = yield self._get_events(
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
2015-01-30 04:32:35 -07:00
|
|
|
|
2015-05-14 06:45:48 -06:00
|
|
|
self._set_before_and_after(events, rows)
|
2014-09-05 19:23:36 -06:00
|
|
|
|
2015-05-14 06:45:48 -06:00
|
|
|
defer.returnValue((events, token))
|
2014-08-19 07:19:48 -06:00
|
|
|
|
2016-03-23 05:42:50 -06:00
|
|
|
@defer.inlineCallbacks
|
2015-08-05 08:06:51 -06:00
|
|
|
def get_recent_events_for_room(self, room_id, limit, end_token, from_token=None):
|
2016-03-23 05:42:50 -06:00
|
|
|
rows, token = yield self.get_recent_event_ids_for_room(
|
|
|
|
room_id, limit, end_token, from_token
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug("stream before")
|
|
|
|
events = yield self._get_events(
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
logger.debug("stream after")
|
|
|
|
|
|
|
|
self._set_before_and_after(events, rows)
|
|
|
|
|
|
|
|
defer.returnValue((events, token))
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2016-03-23 05:42:50 -06:00
|
|
|
@cached(num_args=4)
|
|
|
|
def get_recent_event_ids_for_room(self, room_id, limit, end_token, from_token=None):
|
2015-05-11 11:00:33 -06:00
|
|
|
end_token = RoomStreamToken.parse_stream_token(end_token)
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2015-01-27 09:24:22 -07:00
|
|
|
if from_token is None:
|
|
|
|
sql = (
|
|
|
|
"SELECT stream_ordering, topological_ordering, event_id"
|
|
|
|
" FROM events"
|
2015-04-14 06:53:20 -06:00
|
|
|
" WHERE room_id = ? AND stream_ordering <= ? AND outlier = ?"
|
2015-01-27 09:24:22 -07:00
|
|
|
" ORDER BY topological_ordering DESC, stream_ordering DESC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
|
|
|
else:
|
2015-05-11 11:00:33 -06:00
|
|
|
from_token = RoomStreamToken.parse_stream_token(from_token)
|
2015-01-27 09:24:22 -07:00
|
|
|
sql = (
|
|
|
|
"SELECT stream_ordering, topological_ordering, event_id"
|
|
|
|
" FROM events"
|
|
|
|
" WHERE room_id = ? AND stream_ordering > ?"
|
2015-04-14 06:53:20 -06:00
|
|
|
" AND stream_ordering <= ? AND outlier = ?"
|
2015-01-27 09:24:22 -07:00
|
|
|
" ORDER BY topological_ordering DESC, stream_ordering DESC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
|
|
|
|
|
|
|
def get_recent_events_for_room_txn(txn):
|
|
|
|
if from_token is None:
|
2015-04-14 06:53:20 -06:00
|
|
|
txn.execute(sql, (room_id, end_token.stream, False, limit,))
|
2015-01-27 09:24:22 -07:00
|
|
|
else:
|
|
|
|
txn.execute(sql, (
|
2015-04-14 06:53:20 -06:00
|
|
|
room_id, from_token.stream, end_token.stream, False, limit
|
2015-01-27 09:24:22 -07:00
|
|
|
))
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
rows = self.cursor_to_dict(txn)
|
2014-08-18 09:20:21 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
rows.reverse() # As we selected with reverse ordering
|
2014-08-18 09:20:21 -06:00
|
|
|
|
2015-01-06 06:03:23 -07:00
|
|
|
if rows:
|
2015-01-13 07:14:21 -07:00
|
|
|
# Tokens are positions between events.
|
|
|
|
# This token points *after* the last event in the chunk.
|
|
|
|
# We need it to point to the event before it in the chunk
|
|
|
|
# since we are going backwards so we subtract one from the
|
|
|
|
# stream part.
|
|
|
|
topo = rows[0]["topological_ordering"]
|
2015-01-12 10:38:30 -07:00
|
|
|
toke = rows[0]["stream_ordering"] - 1
|
2015-05-11 11:00:33 -06:00
|
|
|
start_token = str(RoomStreamToken(topo, toke))
|
2015-01-06 06:03:23 -07:00
|
|
|
|
2015-01-30 04:42:09 -07:00
|
|
|
token = (start_token, str(end_token))
|
2015-01-06 06:03:23 -07:00
|
|
|
else:
|
2015-01-30 04:42:09 -07:00
|
|
|
token = (str(end_token), str(end_token))
|
2015-01-06 06:03:23 -07:00
|
|
|
|
2015-05-13 09:59:41 -06:00
|
|
|
return rows, token
|
2014-09-05 19:23:36 -06:00
|
|
|
|
2016-03-23 05:42:50 -06:00
|
|
|
return self.runInteraction(
|
2015-01-27 09:24:22 -07:00
|
|
|
"get_recent_events_for_room", get_recent_events_for_room_txn
|
|
|
|
)
|
2014-08-15 06:58:28 -06:00
|
|
|
|
2015-04-09 04:41:36 -06:00
|
|
|
@defer.inlineCallbacks
|
2015-05-12 03:28:10 -06:00
|
|
|
def get_room_events_max_id(self, direction='f'):
|
2016-02-18 09:37:28 -07:00
|
|
|
token = yield self._stream_id_gen.get_max_token()
|
2015-05-12 03:28:10 -06:00
|
|
|
if direction != 'b':
|
|
|
|
defer.returnValue("s%d" % (token,))
|
|
|
|
else:
|
|
|
|
topo = yield self.runInteraction(
|
|
|
|
"_get_max_topological_txn", self._get_max_topological_txn
|
|
|
|
)
|
|
|
|
defer.returnValue("t%d-%d" % (topo, token))
|
|
|
|
|
2015-09-09 06:25:22 -06:00
|
|
|
def get_stream_token_for_event(self, event_id):
|
|
|
|
"""The stream token for an event
|
|
|
|
Args:
|
|
|
|
event_id(str): The id of the event to look up a stream token for.
|
|
|
|
Raises:
|
|
|
|
StoreError if the event wasn't in the database.
|
|
|
|
Returns:
|
|
|
|
A deferred "s%d" stream token.
|
|
|
|
"""
|
|
|
|
return self._simple_select_one_onecol(
|
|
|
|
table="events",
|
|
|
|
keyvalues={"event_id": event_id},
|
|
|
|
retcol="stream_ordering",
|
2015-09-09 10:31:09 -06:00
|
|
|
).addCallback(lambda row: "s%d" % (row,))
|
|
|
|
|
|
|
|
def get_topological_token_for_event(self, event_id):
|
|
|
|
"""The stream token for an event
|
|
|
|
Args:
|
|
|
|
event_id(str): The id of the event to look up a stream token for.
|
|
|
|
Raises:
|
|
|
|
StoreError if the event wasn't in the database.
|
|
|
|
Returns:
|
|
|
|
A deferred "t%d-%d" topological token.
|
|
|
|
"""
|
|
|
|
return self._simple_select_one(
|
|
|
|
table="events",
|
|
|
|
keyvalues={"event_id": event_id},
|
|
|
|
retcols=("stream_ordering", "topological_ordering"),
|
2016-02-03 09:22:35 -07:00
|
|
|
desc="get_topological_token_for_event",
|
2015-09-09 10:31:09 -06:00
|
|
|
).addCallback(lambda row: "t%d-%d" % (
|
|
|
|
row["topological_ordering"], row["stream_ordering"],)
|
|
|
|
)
|
2015-09-09 06:25:22 -06:00
|
|
|
|
2016-01-28 04:52:34 -07:00
|
|
|
def get_max_topological_token_for_stream_and_room(self, room_id, stream_key):
|
|
|
|
sql = (
|
|
|
|
"SELECT max(topological_ordering) FROM events"
|
|
|
|
" WHERE room_id = ? AND stream_ordering < ?"
|
|
|
|
)
|
|
|
|
return self._execute(
|
|
|
|
"get_max_topological_token_for_stream_and_room", None,
|
|
|
|
sql, room_id, stream_key,
|
|
|
|
).addCallback(
|
|
|
|
lambda r: r[0][0] if r else 0
|
|
|
|
)
|
|
|
|
|
2015-05-12 03:28:10 -06:00
|
|
|
def _get_max_topological_txn(self, txn):
|
|
|
|
txn.execute(
|
|
|
|
"SELECT MAX(topological_ordering) FROM events"
|
|
|
|
" WHERE outlier = ?",
|
|
|
|
(False,)
|
|
|
|
)
|
|
|
|
|
|
|
|
rows = txn.fetchall()
|
|
|
|
return rows[0][0] if rows else 0
|
2014-08-26 07:31:48 -06:00
|
|
|
|
2015-01-30 04:32:35 -07:00
|
|
|
@staticmethod
|
2016-01-28 04:34:17 -07:00
|
|
|
def _set_before_and_after(events, rows, topo_order=True):
|
2015-01-30 04:32:35 -07:00
|
|
|
for event, row in zip(events, rows):
|
|
|
|
stream = row["stream_ordering"]
|
2016-01-28 04:34:17 -07:00
|
|
|
if topo_order:
|
|
|
|
topo = event.depth
|
|
|
|
else:
|
|
|
|
topo = None
|
2015-01-30 04:32:35 -07:00
|
|
|
internal = event.internal_metadata
|
2015-05-11 11:00:33 -06:00
|
|
|
internal.before = str(RoomStreamToken(topo, stream - 1))
|
|
|
|
internal.after = str(RoomStreamToken(topo, stream))
|
2016-02-01 09:32:46 -07:00
|
|
|
internal.order = (
|
|
|
|
int(topo) if topo else 0,
|
|
|
|
int(stream),
|
|
|
|
)
|
2015-10-28 07:45:56 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_events_around(self, room_id, event_id, before_limit, after_limit):
|
2015-10-28 08:05:50 -06:00
|
|
|
"""Retrieve events and pagination tokens around a given event in a
|
|
|
|
room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
event_id (str)
|
|
|
|
before_limit (int)
|
|
|
|
after_limit (int)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict
|
|
|
|
"""
|
|
|
|
|
2015-10-28 07:45:56 -06:00
|
|
|
results = yield self.runInteraction(
|
|
|
|
"get_events_around", self._get_events_around_txn,
|
|
|
|
room_id, event_id, before_limit, after_limit
|
|
|
|
)
|
|
|
|
|
|
|
|
events_before = yield self._get_events(
|
|
|
|
[e for e in results["before"]["event_ids"]],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
|
|
|
events_after = yield self._get_events(
|
|
|
|
[e for e in results["after"]["event_ids"]],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue({
|
|
|
|
"events_before": events_before,
|
|
|
|
"events_after": events_after,
|
|
|
|
"start": results["before"]["token"],
|
|
|
|
"end": results["after"]["token"],
|
|
|
|
})
|
|
|
|
|
|
|
|
def _get_events_around_txn(self, txn, room_id, event_id, before_limit, after_limit):
|
2015-10-28 08:05:50 -06:00
|
|
|
"""Retrieves event_ids and pagination tokens around a given event in a
|
|
|
|
room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
event_id (str)
|
|
|
|
before_limit (int)
|
|
|
|
after_limit (int)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict
|
|
|
|
"""
|
|
|
|
|
2015-10-28 07:45:56 -06:00
|
|
|
results = self._simple_select_one_txn(
|
|
|
|
txn,
|
|
|
|
"events",
|
|
|
|
keyvalues={
|
|
|
|
"event_id": event_id,
|
|
|
|
"room_id": room_id,
|
|
|
|
},
|
|
|
|
retcols=["stream_ordering", "topological_ordering"],
|
|
|
|
)
|
|
|
|
|
|
|
|
stream_ordering = results["stream_ordering"]
|
|
|
|
topological_ordering = results["topological_ordering"]
|
|
|
|
|
|
|
|
query_before = (
|
|
|
|
"SELECT topological_ordering, stream_ordering, event_id FROM events"
|
|
|
|
" WHERE room_id = ? AND (topological_ordering < ?"
|
|
|
|
" OR (topological_ordering = ? AND stream_ordering < ?))"
|
|
|
|
" ORDER BY topological_ordering DESC, stream_ordering DESC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
|
|
|
|
|
|
|
query_after = (
|
|
|
|
"SELECT topological_ordering, stream_ordering, event_id FROM events"
|
|
|
|
" WHERE room_id = ? AND (topological_ordering > ?"
|
|
|
|
" OR (topological_ordering = ? AND stream_ordering > ?))"
|
|
|
|
" ORDER BY topological_ordering ASC, stream_ordering ASC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
query_before,
|
|
|
|
(
|
|
|
|
room_id, topological_ordering, topological_ordering,
|
|
|
|
stream_ordering, before_limit,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
rows = self.cursor_to_dict(txn)
|
|
|
|
events_before = [r["event_id"] for r in rows]
|
|
|
|
|
|
|
|
if rows:
|
|
|
|
start_token = str(RoomStreamToken(
|
|
|
|
rows[0]["topological_ordering"],
|
|
|
|
rows[0]["stream_ordering"] - 1,
|
|
|
|
))
|
|
|
|
else:
|
|
|
|
start_token = str(RoomStreamToken(
|
|
|
|
topological_ordering,
|
|
|
|
stream_ordering - 1,
|
|
|
|
))
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
query_after,
|
|
|
|
(
|
|
|
|
room_id, topological_ordering, topological_ordering,
|
|
|
|
stream_ordering, after_limit,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
rows = self.cursor_to_dict(txn)
|
|
|
|
events_after = [r["event_id"] for r in rows]
|
|
|
|
|
|
|
|
if rows:
|
|
|
|
end_token = str(RoomStreamToken(
|
|
|
|
rows[-1]["topological_ordering"],
|
|
|
|
rows[-1]["stream_ordering"],
|
|
|
|
))
|
|
|
|
else:
|
|
|
|
end_token = str(RoomStreamToken(
|
|
|
|
topological_ordering,
|
|
|
|
stream_ordering,
|
|
|
|
))
|
|
|
|
|
|
|
|
return {
|
|
|
|
"before": {
|
|
|
|
"event_ids": events_before,
|
|
|
|
"token": start_token,
|
|
|
|
},
|
|
|
|
"after": {
|
|
|
|
"event_ids": events_after,
|
|
|
|
"token": end_token,
|
|
|
|
},
|
|
|
|
}
|