Support pagination for tokens without chunk part

This commit is contained in:
Erik Johnston 2018-05-23 16:12:32 +01:00
parent 70639b07ec
commit f9f6a6e0c1
1 changed files with 30 additions and 7 deletions

View File

@ -728,6 +728,30 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
assert int(limit) >= 0 assert int(limit) >= 0
# For backwards compatibility we need to check if the token has a
# topological part but no chunk part. If that's the case we can use the
# stream part to generate an appropriate topological token.
if from_token.chunk is None and from_token.topological is not None:
res = self._simple_select_one_txn(
txn,
table="events",
keyvalues={
"stream_ordering": from_token.stream,
},
retcols=(
"chunk_id",
"topological_ordering",
"stream_ordering",
),
allow_none=True,
)
if res and res["chunk_id"] is not None:
from_token = RoomStreamToken(
res["chunk_id"],
res["topological_ordering"],
res["stream_ordering"],
)
# Tokens really represent positions between elements, but we use # Tokens really represent positions between elements, but we use
# the convention of pointing to the event before the gap. Hence # the convention of pointing to the event before the gap. Hence
# we have a bit of asymmetry when it comes to equalities. # we have a bit of asymmetry when it comes to equalities.
@ -778,13 +802,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
iterated_chunks = [] iterated_chunks = []
chunk_id = None chunk_id = None
if from_token.chunk: # FIXME: may be topological but no chunk. if rows:
if rows: chunk_id = rows[-1].chunk_id
chunk_id = rows[-1].chunk_id iterated_chunks = [r.chunk_id for r in rows]
iterated_chunks = [r.chunk_id for r in rows] elif from_token.chunk:
else: chunk_id = from_token.chunk
chunk_id = from_token.chunk iterated_chunks = [chunk_id]
iterated_chunks = [chunk_id]
table = ChunkDBOrderedListStore( table = ChunkDBOrderedListStore(
txn, room_id, self.clock, txn, room_id, self.clock,