2014-10-15 03:04:55 -06:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 21:26:29 -07:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2014-10-15 03:04:55 -06:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2017-11-13 03:30:38 -07:00
|
|
|
import logging
|
2018-07-09 00:09:20 -06:00
|
|
|
from collections import namedtuple
|
2014-10-15 03:04:55 -06:00
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
from six import iteritems, itervalues
|
|
|
|
from six.moves import range
|
|
|
|
|
2015-03-20 07:52:56 -06:00
|
|
|
from twisted.internet import defer
|
|
|
|
|
2017-11-09 12:00:20 -07:00
|
|
|
from synapse.storage.background_updates import BackgroundUpdateStore
|
2017-11-13 03:30:38 -07:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.util.caches import get_cache_factor_for, intern_string
|
2017-11-13 03:30:38 -07:00
|
|
|
from synapse.util.caches.descriptors import cached, cachedList
|
|
|
|
from synapse.util.caches.dictionary_cache import DictionaryCache
|
|
|
|
from synapse.util.stringutils import to_ascii
|
2018-07-09 00:09:20 -06:00
|
|
|
|
2017-11-13 03:30:38 -07:00
|
|
|
from ._base import SQLBaseStore
|
2015-01-06 04:18:12 -07:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2014-10-15 03:04:55 -06:00
|
|
|
|
2016-09-02 03:41:38 -06:00
|
|
|
MAX_STATE_DELTA_HOPS = 100
|
|
|
|
|
|
|
|
|
2017-06-09 09:24:00 -06:00
|
|
|
class _GetStateGroupDelta(namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))):
|
2017-06-09 09:25:42 -06:00
|
|
|
"""Return type of get_state_group_delta that implements __len__, which lets
|
|
|
|
us use the itrable flag when caching
|
|
|
|
"""
|
2017-06-09 09:24:00 -06:00
|
|
|
__slots__ = []
|
|
|
|
|
|
|
|
def __len__(self):
|
2017-06-09 09:40:52 -06:00
|
|
|
return len(self.delta_ids) if self.delta_ids else 0
|
2017-06-09 09:24:00 -06:00
|
|
|
|
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
class StateGroupWorkerStore(SQLBaseStore):
|
|
|
|
"""The parts of StateGroupStore that can be called from workers.
|
2014-11-12 07:33:34 -07:00
|
|
|
"""
|
2014-10-15 03:04:55 -06:00
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
|
2016-09-08 09:18:01 -06:00
|
|
|
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
|
2017-01-31 08:15:41 -07:00
|
|
|
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
|
2016-09-05 02:34:24 -06:00
|
|
|
|
2017-11-09 11:51:27 -07:00
|
|
|
def __init__(self, db_conn, hs):
|
2018-02-06 07:31:24 -07:00
|
|
|
super(StateGroupWorkerStore, self).__init__(db_conn, hs)
|
2016-09-05 02:34:24 -06:00
|
|
|
|
2017-11-13 03:30:38 -07:00
|
|
|
self._state_group_cache = DictionaryCache(
|
2018-02-21 13:49:55 -07:00
|
|
|
"*stateGroupCache*", 500000 * get_cache_factor_for("stateGroupCache")
|
2017-11-13 03:30:38 -07:00
|
|
|
)
|
|
|
|
|
2017-04-07 03:10:49 -06:00
|
|
|
@cached(max_entries=100000, iterable=True)
|
2017-03-10 10:39:35 -07:00
|
|
|
def get_current_state_ids(self, room_id):
|
2017-04-07 06:47:04 -06:00
|
|
|
"""Get the current state event ids for a room based on the
|
|
|
|
current_state_events table.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
deferred: dict of (type, state_key) -> event_id
|
|
|
|
"""
|
2017-04-07 03:10:49 -06:00
|
|
|
def _get_current_state_ids_txn(txn):
|
|
|
|
txn.execute(
|
|
|
|
"""SELECT type, state_key, event_id FROM current_state_events
|
|
|
|
WHERE room_id = ?
|
|
|
|
""",
|
|
|
|
(room_id,)
|
|
|
|
)
|
|
|
|
|
|
|
|
return {
|
2017-04-25 10:22:55 -06:00
|
|
|
(intern_string(r[0]), intern_string(r[1])): to_ascii(r[2]) for r in txn
|
2017-04-07 03:10:49 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
return self.runInteraction(
|
|
|
|
"get_current_state_ids",
|
|
|
|
_get_current_state_ids_txn,
|
2017-03-10 10:39:35 -07:00
|
|
|
)
|
|
|
|
|
2017-06-09 09:24:00 -06:00
|
|
|
@cached(max_entries=10000, iterable=True)
|
2017-05-25 10:08:41 -06:00
|
|
|
def get_state_group_delta(self, state_group):
|
2017-06-07 04:08:36 -06:00
|
|
|
"""Given a state group try to return a previous group and a delta between
|
|
|
|
the old and the new.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
(prev_group, delta_ids), where both may be None.
|
|
|
|
"""
|
2017-05-25 10:08:41 -06:00
|
|
|
def _get_state_group_delta_txn(txn):
|
|
|
|
prev_group = self._simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={
|
|
|
|
"state_group": state_group,
|
|
|
|
},
|
|
|
|
retcol="prev_state_group",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
if not prev_group:
|
2017-06-09 09:24:00 -06:00
|
|
|
return _GetStateGroupDelta(None, None)
|
2017-05-25 10:08:41 -06:00
|
|
|
|
|
|
|
delta_ids = self._simple_select_list_txn(
|
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
keyvalues={
|
|
|
|
"state_group": state_group,
|
|
|
|
},
|
|
|
|
retcols=("type", "state_key", "event_id",)
|
|
|
|
)
|
|
|
|
|
2017-06-09 09:24:00 -06:00
|
|
|
return _GetStateGroupDelta(prev_group, {
|
2017-05-25 10:08:41 -06:00
|
|
|
(row["type"], row["state_key"]): row["event_id"]
|
|
|
|
for row in delta_ids
|
2017-06-09 09:24:00 -06:00
|
|
|
})
|
2017-05-25 10:08:41 -06:00
|
|
|
return self.runInteraction(
|
|
|
|
"get_state_group_delta",
|
|
|
|
_get_state_group_delta_txn,
|
|
|
|
)
|
|
|
|
|
2015-05-13 04:13:31 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-08-25 06:28:31 -06:00
|
|
|
def get_state_groups_ids(self, room_id, event_ids):
|
2015-08-07 11:15:30 -06:00
|
|
|
if not event_ids:
|
|
|
|
defer.returnValue({})
|
2014-11-12 07:33:34 -07:00
|
|
|
|
2015-08-07 11:15:30 -06:00
|
|
|
event_to_groups = yield self._get_state_group_for_events(
|
2015-10-12 08:06:14 -06:00
|
|
|
event_ids,
|
2015-08-07 11:15:30 -06:00
|
|
|
)
|
2014-11-12 07:33:34 -07:00
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
groups = set(itervalues(event_to_groups))
|
2015-08-07 11:15:30 -06:00
|
|
|
group_to_state = yield self._get_state_for_groups(groups)
|
2015-05-13 04:29:03 -06:00
|
|
|
|
2016-08-25 06:28:31 -06:00
|
|
|
defer.returnValue(group_to_state)
|
|
|
|
|
2018-02-15 06:53:18 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_state_ids_for_group(self, state_group):
|
|
|
|
"""Get the state IDs for the given state group
|
|
|
|
|
|
|
|
Args:
|
|
|
|
state_group (int)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[dict]: Resolves to a map of (type, state_key) -> event_id
|
|
|
|
"""
|
|
|
|
group_to_state = yield self._get_state_for_groups((state_group,))
|
|
|
|
|
|
|
|
defer.returnValue(group_to_state[state_group])
|
|
|
|
|
2016-08-25 06:28:31 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_state_groups(self, room_id, event_ids):
|
|
|
|
""" Get the state groups for the given list of event_ids
|
|
|
|
|
|
|
|
The return value is a dict mapping group names to lists of events.
|
|
|
|
"""
|
|
|
|
if not event_ids:
|
|
|
|
defer.returnValue({})
|
|
|
|
|
|
|
|
group_to_ids = yield self.get_state_groups_ids(room_id, event_ids)
|
|
|
|
|
|
|
|
state_event_map = yield self.get_events(
|
|
|
|
[
|
2018-05-31 03:03:47 -06:00
|
|
|
ev_id for group_ids in itervalues(group_to_ids)
|
|
|
|
for ev_id in itervalues(group_ids)
|
2016-08-25 06:28:31 -06:00
|
|
|
],
|
|
|
|
get_prev_content=False
|
|
|
|
)
|
|
|
|
|
2015-08-05 08:06:51 -06:00
|
|
|
defer.returnValue({
|
2016-08-25 06:28:31 -06:00
|
|
|
group: [
|
2018-05-31 03:03:47 -06:00
|
|
|
state_event_map[v] for v in itervalues(event_id_map)
|
2017-03-24 04:57:02 -06:00
|
|
|
if v in state_event_map
|
2016-08-25 06:28:31 -06:00
|
|
|
]
|
2018-05-31 03:03:47 -06:00
|
|
|
for group, event_id_map in iteritems(group_to_ids)
|
2015-08-05 08:06:51 -06:00
|
|
|
})
|
2015-06-03 07:45:55 -06:00
|
|
|
|
2017-04-24 06:27:38 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-02-10 06:24:42 -07:00
|
|
|
def _get_state_groups_from_groups(self, groups, types):
|
2016-03-22 05:59:31 -06:00
|
|
|
"""Returns dictionary state_group -> (dict of (type, state_key) -> event id)
|
2015-08-10 08:01:06 -06:00
|
|
|
"""
|
2016-04-19 10:22:03 -06:00
|
|
|
results = {}
|
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
chunks = [groups[i:i + 100] for i in range(0, len(groups), 100)]
|
2016-02-10 06:24:42 -07:00
|
|
|
for chunk in chunks:
|
2016-04-19 10:22:03 -06:00
|
|
|
res = yield self.runInteraction(
|
2016-02-10 06:24:42 -07:00
|
|
|
"_get_state_groups_from_groups",
|
2016-09-05 02:34:24 -06:00
|
|
|
self._get_state_groups_from_groups_txn, chunk, types,
|
2016-02-10 06:24:42 -07:00
|
|
|
)
|
2016-04-19 10:22:03 -06:00
|
|
|
results.update(res)
|
|
|
|
|
|
|
|
defer.returnValue(results)
|
2015-08-07 11:15:30 -06:00
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
def _get_state_groups_from_groups_txn(self, txn, groups, types=None):
|
|
|
|
results = {group: {} for group in groups}
|
2016-09-26 10:16:24 -06:00
|
|
|
if types is not None:
|
|
|
|
types = list(set(types)) # deduplicate types list
|
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
2016-09-08 02:38:54 -06:00
|
|
|
# Temporarily disable sequential scans in this transaction. This is
|
|
|
|
# a temporary hack until we can add the right indices in
|
|
|
|
txn.execute("SET LOCAL enable_seqscan=off")
|
|
|
|
|
2016-09-05 03:41:27 -06:00
|
|
|
# The below query walks the state_group tree so that the "state"
|
|
|
|
# table includes all state_groups in the tree. It then joins
|
|
|
|
# against `state_groups_state` to fetch the latest state.
|
|
|
|
# It assumes that previous state groups are always numerically
|
|
|
|
# lesser.
|
2016-09-07 07:53:19 -06:00
|
|
|
# The PARTITION is used to get the event_id in the greatest state
|
|
|
|
# group for the given type, state_key.
|
|
|
|
# This may return multiple rows per (type, state_key), but last_value
|
|
|
|
# should be the same.
|
2016-09-05 02:34:24 -06:00
|
|
|
sql = ("""
|
|
|
|
WITH RECURSIVE state(state_group) AS (
|
|
|
|
VALUES(?::bigint)
|
|
|
|
UNION ALL
|
|
|
|
SELECT prev_state_group FROM state_group_edges e, state s
|
|
|
|
WHERE s.state_group = e.state_group
|
|
|
|
)
|
2016-09-07 07:22:22 -06:00
|
|
|
SELECT type, state_key, last_value(event_id) OVER (
|
|
|
|
PARTITION BY type, state_key ORDER BY state_group ASC
|
2016-09-07 07:39:01 -06:00
|
|
|
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
|
2016-09-07 07:22:22 -06:00
|
|
|
) AS event_id FROM state_groups_state
|
|
|
|
WHERE state_group IN (
|
|
|
|
SELECT state_group FROM state
|
2016-09-05 02:34:24 -06:00
|
|
|
)
|
2016-09-12 03:05:07 -06:00
|
|
|
%s
|
|
|
|
""")
|
2016-09-05 02:34:24 -06:00
|
|
|
|
2016-09-12 03:05:07 -06:00
|
|
|
# Turns out that postgres doesn't like doing a list of OR's and
|
|
|
|
# is about 1000x slower, so we just issue a query for each specific
|
|
|
|
# type seperately.
|
|
|
|
if types:
|
|
|
|
clause_to_args = [
|
|
|
|
(
|
|
|
|
"AND type = ? AND state_key = ?",
|
|
|
|
(etype, state_key)
|
2018-03-13 16:36:04 -06:00
|
|
|
) if state_key is not None else (
|
|
|
|
"AND type = ?",
|
|
|
|
(etype,)
|
2016-09-12 03:05:07 -06:00
|
|
|
)
|
|
|
|
for etype, state_key in types
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
# If types is None we fetch all the state, and so just use an
|
|
|
|
# empty where clause with no extra args.
|
|
|
|
clause_to_args = [("", [])]
|
|
|
|
|
|
|
|
for where_clause, where_args in clause_to_args:
|
|
|
|
for group in groups:
|
|
|
|
args = [group]
|
|
|
|
args.extend(where_args)
|
|
|
|
|
|
|
|
txn.execute(sql % (where_clause,), args)
|
2017-03-24 05:07:02 -06:00
|
|
|
for row in txn:
|
|
|
|
typ, state_key, event_id = row
|
|
|
|
key = (typ, state_key)
|
|
|
|
results[group][key] = event_id
|
2016-09-05 02:34:24 -06:00
|
|
|
else:
|
2018-03-13 16:36:04 -06:00
|
|
|
where_args = []
|
|
|
|
where_clauses = []
|
|
|
|
wildcard_types = False
|
2016-09-12 03:05:07 -06:00
|
|
|
if types is not None:
|
2018-03-13 16:36:04 -06:00
|
|
|
for typ in types:
|
|
|
|
if typ[1] is None:
|
|
|
|
where_clauses.append("(type = ?)")
|
2018-06-06 07:19:01 -06:00
|
|
|
where_args.append(typ[0])
|
2018-03-13 16:36:04 -06:00
|
|
|
wildcard_types = True
|
|
|
|
else:
|
|
|
|
where_clauses.append("(type = ? AND state_key = ?)")
|
|
|
|
where_args.extend([typ[0], typ[1]])
|
|
|
|
where_clause = "AND (%s)" % (" OR ".join(where_clauses))
|
2016-09-12 03:05:07 -06:00
|
|
|
else:
|
|
|
|
where_clause = ""
|
|
|
|
|
2016-09-05 07:50:36 -06:00
|
|
|
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
|
|
|
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
2016-09-05 02:34:24 -06:00
|
|
|
for group in groups:
|
|
|
|
next_group = group
|
|
|
|
|
|
|
|
while next_group:
|
2016-09-26 10:16:24 -06:00
|
|
|
# We did this before by getting the list of group ids, and
|
|
|
|
# then passing that list to sqlite to get latest event for
|
|
|
|
# each (type, state_key). However, that was terribly slow
|
2017-01-05 06:39:43 -07:00
|
|
|
# without the right indices (which we can't add until
|
2016-09-26 10:16:24 -06:00
|
|
|
# after we finish deduping state, which requires this func)
|
2016-09-27 02:27:54 -06:00
|
|
|
args = [next_group]
|
|
|
|
if types:
|
2018-03-13 16:36:04 -06:00
|
|
|
args.extend(where_args)
|
2016-09-27 02:27:54 -06:00
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"SELECT type, state_key, event_id FROM state_groups_state"
|
|
|
|
" WHERE state_group = ? %s" % (where_clause,),
|
|
|
|
args
|
|
|
|
)
|
2017-03-24 04:57:02 -06:00
|
|
|
results[group].update(
|
|
|
|
((typ, state_key), event_id)
|
2017-03-23 11:53:49 -06:00
|
|
|
for typ, state_key, event_id in txn
|
2016-09-27 02:27:54 -06:00
|
|
|
if (typ, state_key) not in results[group]
|
2017-03-24 04:57:02 -06:00
|
|
|
)
|
2016-09-27 02:27:54 -06:00
|
|
|
|
2018-03-13 16:36:04 -06:00
|
|
|
# If the number of entries in the (type,state_key)->event_id dict
|
|
|
|
# matches the number of (type,state_keys) types we were searching
|
|
|
|
# for, then we must have found them all, so no need to go walk
|
|
|
|
# further down the tree... UNLESS our types filter contained
|
|
|
|
# wildcards (i.e. Nones) in which case we have to do an exhaustive
|
|
|
|
# search
|
|
|
|
if (
|
|
|
|
types is not None and
|
|
|
|
not wildcard_types and
|
|
|
|
len(results[group]) == len(types)
|
|
|
|
):
|
2016-09-27 02:27:54 -06:00
|
|
|
break
|
2016-09-26 10:16:24 -06:00
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
next_group = self._simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={"state_group": next_group},
|
|
|
|
retcol="prev_state_group",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
2015-08-04 02:32:23 -06:00
|
|
|
@defer.inlineCallbacks
|
2015-10-12 08:06:14 -06:00
|
|
|
def get_state_for_events(self, event_ids, types):
|
2015-08-04 04:08:07 -06:00
|
|
|
"""Given a list of event_ids and type tuples, return a list of state
|
|
|
|
dicts for each event. The state dicts will only have the type/state_keys
|
|
|
|
that are in the `types` list.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event_ids (list)
|
|
|
|
types (list): List of (type, state_key) tuples which are used to
|
|
|
|
filter the state fetched. `state_key` may be None, which matches
|
|
|
|
any `state_key`
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
deferred: A list of dicts corresponding to the event_ids given.
|
|
|
|
The dicts are mappings from (type, state_key) -> state_events
|
|
|
|
"""
|
2015-08-07 11:15:30 -06:00
|
|
|
event_to_groups = yield self._get_state_group_for_events(
|
2015-10-12 08:06:14 -06:00
|
|
|
event_ids,
|
2015-08-07 11:15:30 -06:00
|
|
|
)
|
2015-08-04 02:32:23 -06:00
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
groups = set(itervalues(event_to_groups))
|
2015-08-13 10:11:30 -06:00
|
|
|
group_to_state = yield self._get_state_for_groups(groups, types)
|
2015-07-02 09:20:10 -06:00
|
|
|
|
2016-08-25 06:28:31 -06:00
|
|
|
state_event_map = yield self.get_events(
|
2018-05-31 03:03:47 -06:00
|
|
|
[ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)],
|
2016-08-25 06:28:31 -06:00
|
|
|
get_prev_content=False
|
|
|
|
)
|
|
|
|
|
2015-08-04 02:32:23 -06:00
|
|
|
event_to_state = {
|
2016-08-25 06:28:31 -06:00
|
|
|
event_id: {
|
|
|
|
k: state_event_map[v]
|
2018-05-31 03:03:47 -06:00
|
|
|
for k, v in iteritems(group_to_state[group])
|
2016-08-25 06:28:31 -06:00
|
|
|
if v in state_event_map
|
|
|
|
}
|
2018-05-31 03:03:47 -06:00
|
|
|
for event_id, group in iteritems(event_to_groups)
|
2015-07-02 09:20:10 -06:00
|
|
|
}
|
|
|
|
|
2015-08-13 10:11:30 -06:00
|
|
|
defer.returnValue({event: event_to_state[event] for event in event_ids})
|
2015-07-02 09:20:10 -06:00
|
|
|
|
2016-08-25 11:59:44 -06:00
|
|
|
@defer.inlineCallbacks
|
2017-02-14 06:59:50 -07:00
|
|
|
def get_state_ids_for_events(self, event_ids, types=None):
|
|
|
|
"""
|
|
|
|
Get the state dicts corresponding to a list of events
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event_ids(list(str)): events whose state should be returned
|
|
|
|
types(list[(str, str)]|None): List of (type, state_key) tuples
|
|
|
|
which are used to filter the state fetched. May be None, which
|
|
|
|
matches any key
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A deferred dict from event_id -> (type, state_key) -> state_event
|
|
|
|
"""
|
2016-08-25 11:59:44 -06:00
|
|
|
event_to_groups = yield self._get_state_group_for_events(
|
|
|
|
event_ids,
|
|
|
|
)
|
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
groups = set(itervalues(event_to_groups))
|
2016-08-25 11:59:44 -06:00
|
|
|
group_to_state = yield self._get_state_for_groups(groups, types)
|
|
|
|
|
|
|
|
event_to_state = {
|
|
|
|
event_id: group_to_state[group]
|
2018-05-31 03:03:47 -06:00
|
|
|
for event_id, group in iteritems(event_to_groups)
|
2016-08-25 11:59:44 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
defer.returnValue({event: event_to_state[event] for event in event_ids})
|
|
|
|
|
2015-11-10 11:27:23 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_state_for_event(self, event_id, types=None):
|
|
|
|
"""
|
|
|
|
Get the state dict corresponding to a particular event
|
|
|
|
|
2016-04-01 09:08:59 -06:00
|
|
|
Args:
|
|
|
|
event_id(str): event whose state should be returned
|
|
|
|
types(list[(str, str)]|None): List of (type, state_key) tuples
|
|
|
|
which are used to filter the state fetched. May be None, which
|
|
|
|
matches any key
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A deferred dict from (type, state_key) -> state_event
|
2015-11-10 11:27:23 -07:00
|
|
|
"""
|
|
|
|
state_map = yield self.get_state_for_events([event_id], types)
|
|
|
|
defer.returnValue(state_map[event_id])
|
|
|
|
|
2016-08-25 11:59:44 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_state_ids_for_event(self, event_id, types=None):
|
|
|
|
"""
|
|
|
|
Get the state dict corresponding to a particular event
|
|
|
|
|
|
|
|
Args:
|
|
|
|
event_id(str): event whose state should be returned
|
|
|
|
types(list[(str, str)]|None): List of (type, state_key) tuples
|
|
|
|
which are used to filter the state fetched. May be None, which
|
|
|
|
matches any key
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A deferred dict from (type, state_key) -> state_event
|
|
|
|
"""
|
|
|
|
state_map = yield self.get_state_ids_for_events([event_id], types)
|
|
|
|
defer.returnValue(state_map[event_id])
|
|
|
|
|
2017-06-29 08:38:48 -06:00
|
|
|
@cached(max_entries=50000)
|
|
|
|
def _get_state_group_for_event(self, event_id):
|
2015-08-05 08:06:51 -06:00
|
|
|
return self._simple_select_one_onecol(
|
|
|
|
table="event_to_state_groups",
|
|
|
|
keyvalues={
|
|
|
|
"event_id": event_id,
|
|
|
|
},
|
|
|
|
retcol="state_group",
|
|
|
|
allow_none=True,
|
|
|
|
desc="_get_state_group_for_event",
|
|
|
|
)
|
|
|
|
|
2016-04-06 06:08:05 -06:00
|
|
|
@cachedList(cached_method_name="_get_state_group_for_event",
|
|
|
|
list_name="event_ids", num_args=1, inlineCallbacks=True)
|
2015-10-12 08:06:14 -06:00
|
|
|
def _get_state_group_for_events(self, event_ids):
|
2015-08-10 08:01:06 -06:00
|
|
|
"""Returns mapping event_id -> state_group
|
|
|
|
"""
|
2016-02-10 05:57:50 -07:00
|
|
|
rows = yield self._simple_select_many_batch(
|
|
|
|
table="event_to_state_groups",
|
|
|
|
column="event_id",
|
|
|
|
iterable=event_ids,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("event_id", "state_group",),
|
|
|
|
desc="_get_state_group_for_events",
|
|
|
|
)
|
2015-08-07 11:15:30 -06:00
|
|
|
|
2016-03-23 10:29:46 -06:00
|
|
|
defer.returnValue({row["event_id"]: row["state_group"] for row in rows})
|
2015-08-07 11:15:30 -06:00
|
|
|
|
2015-08-12 10:06:21 -06:00
|
|
|
def _get_some_state_from_cache(self, group, types):
|
2015-08-10 08:01:06 -06:00
|
|
|
"""Checks if group is in cache. See `_get_state_for_groups`
|
2015-08-11 02:12:41 -06:00
|
|
|
|
2015-08-11 04:40:40 -06:00
|
|
|
Returns 3-tuple (`state_dict`, `missing_types`, `got_all`).
|
|
|
|
`missing_types` is the list of types that aren't in the cache for that
|
2015-08-12 10:06:21 -06:00
|
|
|
group. `got_all` is a bool indicating if we successfully retrieved all
|
|
|
|
requests state from the cache, if False we need to query the DB for the
|
|
|
|
missing state.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
group: The state group to lookup
|
|
|
|
types (list): List of 2-tuples of the form (`type`, `state_key`),
|
|
|
|
where a `state_key` of `None` matches all state_keys for the
|
|
|
|
`type`.
|
2015-08-10 08:01:06 -06:00
|
|
|
"""
|
2017-05-17 07:31:23 -06:00
|
|
|
is_all, known_absent, state_dict_ids = self._state_group_cache.get(group)
|
2015-08-05 08:06:51 -06:00
|
|
|
|
|
|
|
type_to_key = {}
|
|
|
|
missing_types = set()
|
2017-05-17 07:31:23 -06:00
|
|
|
|
2015-08-12 10:06:21 -06:00
|
|
|
for typ, state_key in types:
|
2017-05-17 07:31:23 -06:00
|
|
|
key = (typ, state_key)
|
2015-08-12 10:06:21 -06:00
|
|
|
if state_key is None:
|
|
|
|
type_to_key[typ] = None
|
2017-05-17 07:31:23 -06:00
|
|
|
missing_types.add(key)
|
2015-08-12 10:06:21 -06:00
|
|
|
else:
|
|
|
|
if type_to_key.get(typ, object()) is not None:
|
|
|
|
type_to_key.setdefault(typ, set()).add(state_key)
|
2015-08-05 08:06:51 -06:00
|
|
|
|
2017-05-17 07:31:23 -06:00
|
|
|
if key not in state_dict_ids and key not in known_absent:
|
|
|
|
missing_types.add(key)
|
2015-08-07 03:17:38 -06:00
|
|
|
|
2015-08-11 02:12:41 -06:00
|
|
|
sentinel = object()
|
2015-08-05 08:06:51 -06:00
|
|
|
|
2015-08-11 02:12:41 -06:00
|
|
|
def include(typ, state_key):
|
|
|
|
valid_state_keys = type_to_key.get(typ, sentinel)
|
|
|
|
if valid_state_keys is sentinel:
|
|
|
|
return False
|
|
|
|
if valid_state_keys is None:
|
|
|
|
return True
|
|
|
|
if state_key in valid_state_keys:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2017-05-17 07:31:23 -06:00
|
|
|
got_all = is_all or not missing_types
|
2015-08-11 04:40:40 -06:00
|
|
|
|
2015-08-11 02:12:41 -06:00
|
|
|
return {
|
2018-05-31 03:03:47 -06:00
|
|
|
k: v for k, v in iteritems(state_dict_ids)
|
2015-08-11 02:12:41 -06:00
|
|
|
if include(k[0], k[1])
|
2015-08-11 04:40:40 -06:00
|
|
|
}, missing_types, got_all
|
2015-08-07 11:15:30 -06:00
|
|
|
|
2015-08-12 10:06:21 -06:00
|
|
|
def _get_all_state_from_cache(self, group):
|
|
|
|
"""Checks if group is in cache. See `_get_state_for_groups`
|
|
|
|
|
|
|
|
Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool
|
|
|
|
indicating if we successfully retrieved all requests state from the
|
|
|
|
cache, if False we need to query the DB for the missing state.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
group: The state group to lookup
|
|
|
|
"""
|
2017-05-17 07:31:23 -06:00
|
|
|
is_all, _, state_dict_ids = self._state_group_cache.get(group)
|
2016-03-22 05:59:31 -06:00
|
|
|
|
|
|
|
return state_dict_ids, is_all
|
2015-08-12 10:06:21 -06:00
|
|
|
|
2015-08-07 11:15:30 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _get_state_for_groups(self, groups, types=None):
|
2018-06-11 16:13:06 -06:00
|
|
|
"""Gets the state at each of a list of state groups, optionally
|
|
|
|
filtering by type/state_key
|
|
|
|
|
|
|
|
Args:
|
|
|
|
groups (iterable[int]): list of state groups for which we want
|
|
|
|
to get the state.
|
|
|
|
types (None|iterable[(str, None|str)]):
|
|
|
|
indicates the state type/keys required. If None, the whole
|
|
|
|
state is fetched and returned.
|
|
|
|
|
|
|
|
Otherwise, each entry should be a `(type, state_key)` tuple to
|
|
|
|
include in the response. A `state_key` of None is a wildcard
|
|
|
|
meaning that we require all state with that type.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[dict[int, dict[(type, state_key), EventBase]]]
|
|
|
|
a dictionary mapping from state group to state dictionary.
|
2015-08-10 08:01:06 -06:00
|
|
|
"""
|
2016-04-19 10:22:03 -06:00
|
|
|
if types:
|
|
|
|
types = frozenset(types)
|
2015-08-07 11:15:30 -06:00
|
|
|
results = {}
|
2016-02-10 06:24:42 -07:00
|
|
|
missing_groups = []
|
2015-08-12 10:06:21 -06:00
|
|
|
if types is not None:
|
|
|
|
for group in set(groups):
|
2017-05-17 07:31:23 -06:00
|
|
|
state_dict_ids, _, got_all = self._get_some_state_from_cache(
|
2018-06-11 16:13:06 -06:00
|
|
|
group, types,
|
2015-08-12 10:06:21 -06:00
|
|
|
)
|
2016-03-22 05:59:31 -06:00
|
|
|
results[group] = state_dict_ids
|
2015-08-12 10:06:21 -06:00
|
|
|
|
|
|
|
if not got_all:
|
2016-02-10 06:24:42 -07:00
|
|
|
missing_groups.append(group)
|
2015-08-12 10:06:21 -06:00
|
|
|
else:
|
|
|
|
for group in set(groups):
|
2016-03-22 05:59:31 -06:00
|
|
|
state_dict_ids, got_all = self._get_all_state_from_cache(
|
2015-08-12 10:06:21 -06:00
|
|
|
group
|
|
|
|
)
|
2016-03-22 05:59:31 -06:00
|
|
|
|
|
|
|
results[group] = state_dict_ids
|
2015-08-11 02:12:41 -06:00
|
|
|
|
2015-08-12 10:06:21 -06:00
|
|
|
if not got_all:
|
2016-02-10 06:24:42 -07:00
|
|
|
missing_groups.append(group)
|
2015-08-07 11:15:30 -06:00
|
|
|
|
2016-03-22 05:59:31 -06:00
|
|
|
if missing_groups:
|
|
|
|
# Okay, so we have some missing_types, lets fetch them.
|
|
|
|
cache_seq_num = self._state_group_cache.sequence
|
2015-08-05 08:06:51 -06:00
|
|
|
|
2018-06-11 16:13:06 -06:00
|
|
|
# the DictionaryCache knows if it has *all* the state, but
|
|
|
|
# does not know if it has all of the keys of a particular type,
|
|
|
|
# which makes wildcard lookups expensive unless we have a complete
|
|
|
|
# cache. Hence, if we are doing a wildcard lookup, populate the
|
|
|
|
# cache fully so that we can do an efficient lookup next time.
|
|
|
|
|
|
|
|
if types and any(k is None for (t, k) in types):
|
|
|
|
types_to_fetch = None
|
|
|
|
else:
|
|
|
|
types_to_fetch = types
|
|
|
|
|
2016-03-22 05:59:31 -06:00
|
|
|
group_to_state_dict = yield self._get_state_groups_from_groups(
|
2018-06-11 16:13:06 -06:00
|
|
|
missing_groups, types_to_fetch,
|
2016-03-22 05:59:31 -06:00
|
|
|
)
|
2015-08-07 11:15:30 -06:00
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
for group, group_state_dict in iteritems(group_to_state_dict):
|
2017-05-17 07:31:23 -06:00
|
|
|
state_dict = results[group]
|
2016-03-22 05:59:31 -06:00
|
|
|
|
2018-06-11 16:13:06 -06:00
|
|
|
# update the result, filtering by `types`.
|
|
|
|
if types:
|
|
|
|
for k, v in iteritems(group_state_dict):
|
|
|
|
(typ, _) = k
|
|
|
|
if k in types or (typ, None) in types:
|
|
|
|
state_dict[k] = v
|
|
|
|
else:
|
|
|
|
state_dict.update(group_state_dict)
|
|
|
|
|
|
|
|
# update the cache with all the things we fetched from the
|
|
|
|
# database.
|
2016-03-22 05:59:31 -06:00
|
|
|
self._state_group_cache.update(
|
|
|
|
cache_seq_num,
|
|
|
|
key=group,
|
2018-06-11 16:13:06 -06:00
|
|
|
value=group_state_dict,
|
|
|
|
fetched_keys=types_to_fetch,
|
2016-03-22 05:59:31 -06:00
|
|
|
)
|
2015-08-05 08:06:51 -06:00
|
|
|
|
2015-08-07 11:15:30 -06:00
|
|
|
defer.returnValue(results)
|
2016-03-30 08:58:20 -06:00
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
def store_state_group(self, event_id, room_id, prev_group, delta_ids,
|
|
|
|
current_state_ids):
|
|
|
|
"""Store a new set of state, returning a newly assigned state group.
|
2017-11-09 12:00:20 -07:00
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
Args:
|
|
|
|
event_id (str): The event ID for which the state was calculated
|
|
|
|
room_id (str)
|
|
|
|
prev_group (int|None): A previous state group for the room, optional.
|
|
|
|
delta_ids (dict|None): The delta between state at `prev_group` and
|
|
|
|
`current_state_ids`, if `prev_group` was given. Same format as
|
|
|
|
`current_state_ids`.
|
|
|
|
current_state_ids (dict): The state to store. Map of (type, state_key)
|
|
|
|
to event_id.
|
2017-11-09 12:00:20 -07:00
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
Returns:
|
|
|
|
Deferred[int]: The state group ID
|
|
|
|
"""
|
|
|
|
def _store_state_group_txn(txn):
|
|
|
|
if current_state_ids is None:
|
2017-11-09 12:00:20 -07:00
|
|
|
# AFAIK, this can never happen
|
2018-02-06 07:31:24 -07:00
|
|
|
raise Exception("current_state_ids cannot be None")
|
2017-11-09 12:00:20 -07:00
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
state_group = self.database_engine.get_next_state_group_id(txn)
|
2017-11-09 12:00:20 -07:00
|
|
|
|
|
|
|
self._simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="state_groups",
|
|
|
|
values={
|
2018-02-06 07:31:24 -07:00
|
|
|
"id": state_group,
|
|
|
|
"room_id": room_id,
|
|
|
|
"event_id": event_id,
|
2017-11-09 12:00:20 -07:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
# We persist as a delta if we can, while also ensuring the chain
|
|
|
|
# of deltas isn't tooo long, as otherwise read performance degrades.
|
2018-02-06 07:31:24 -07:00
|
|
|
if prev_group:
|
2017-11-09 12:00:20 -07:00
|
|
|
is_in_db = self._simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="state_groups",
|
2018-02-06 07:31:24 -07:00
|
|
|
keyvalues={"id": prev_group},
|
2017-11-09 12:00:20 -07:00
|
|
|
retcol="id",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if not is_in_db:
|
|
|
|
raise Exception(
|
|
|
|
"Trying to persist state with unpersisted prev_group: %r"
|
2018-02-06 07:31:24 -07:00
|
|
|
% (prev_group,)
|
2017-11-09 12:00:20 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
potential_hops = self._count_state_group_hops_txn(
|
2018-02-06 07:31:24 -07:00
|
|
|
txn, prev_group
|
2017-11-09 12:00:20 -07:00
|
|
|
)
|
2018-02-06 07:31:24 -07:00
|
|
|
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
|
2017-11-09 12:00:20 -07:00
|
|
|
self._simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
values={
|
2018-02-06 07:31:24 -07:00
|
|
|
"state_group": state_group,
|
|
|
|
"prev_state_group": prev_group,
|
2017-11-09 12:00:20 -07:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
self._simple_insert_many_txn(
|
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
values=[
|
|
|
|
{
|
2018-02-06 07:31:24 -07:00
|
|
|
"state_group": state_group,
|
|
|
|
"room_id": room_id,
|
2017-11-09 12:00:20 -07:00
|
|
|
"type": key[0],
|
|
|
|
"state_key": key[1],
|
|
|
|
"event_id": state_id,
|
|
|
|
}
|
2018-05-31 03:03:47 -06:00
|
|
|
for key, state_id in iteritems(delta_ids)
|
2017-11-09 12:00:20 -07:00
|
|
|
],
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self._simple_insert_many_txn(
|
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
values=[
|
|
|
|
{
|
2018-02-06 07:31:24 -07:00
|
|
|
"state_group": state_group,
|
|
|
|
"room_id": room_id,
|
2017-11-09 12:00:20 -07:00
|
|
|
"type": key[0],
|
|
|
|
"state_key": key[1],
|
|
|
|
"event_id": state_id,
|
|
|
|
}
|
2018-05-31 03:03:47 -06:00
|
|
|
for key, state_id in iteritems(current_state_ids)
|
2017-11-09 12:00:20 -07:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
# Prefill the state group cache with this group.
|
|
|
|
# It's fine to use the sequence like this as the state group map
|
|
|
|
# is immutable. (If the map wasn't immutable then this prefill could
|
|
|
|
# race with another update)
|
|
|
|
txn.call_after(
|
|
|
|
self._state_group_cache.update,
|
|
|
|
self._state_group_cache.sequence,
|
2018-02-06 07:31:24 -07:00
|
|
|
key=state_group,
|
|
|
|
value=dict(current_state_ids),
|
2017-11-09 12:00:20 -07:00
|
|
|
)
|
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
return state_group
|
|
|
|
|
|
|
|
return self.runInteraction("store_state_group", _store_state_group_txn)
|
|
|
|
|
2018-02-15 03:51:46 -07:00
|
|
|
def _count_state_group_hops_txn(self, txn, state_group):
|
|
|
|
"""Given a state group, count how many hops there are in the tree.
|
|
|
|
|
|
|
|
This is used to ensure the delta chains don't get too long.
|
|
|
|
"""
|
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
sql = ("""
|
|
|
|
WITH RECURSIVE state(state_group) AS (
|
|
|
|
VALUES(?::bigint)
|
|
|
|
UNION ALL
|
|
|
|
SELECT prev_state_group FROM state_group_edges e, state s
|
|
|
|
WHERE s.state_group = e.state_group
|
|
|
|
)
|
|
|
|
SELECT count(*) FROM state;
|
|
|
|
""")
|
|
|
|
|
|
|
|
txn.execute(sql, (state_group,))
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row and row[0]:
|
|
|
|
return row[0]
|
|
|
|
else:
|
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
|
|
|
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
|
|
|
next_group = state_group
|
|
|
|
count = 0
|
|
|
|
|
|
|
|
while next_group:
|
|
|
|
next_group = self._simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={"state_group": next_group},
|
|
|
|
retcol="prev_state_group",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if next_group:
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
return count
|
|
|
|
|
2018-02-06 07:31:24 -07:00
|
|
|
|
|
|
|
class StateStore(StateGroupWorkerStore, BackgroundUpdateStore):
|
|
|
|
""" Keeps track of the state at a given event.
|
|
|
|
|
|
|
|
This is done by the concept of `state groups`. Every event is a assigned
|
|
|
|
a state group (identified by an arbitrary string), which references a
|
|
|
|
collection of state events. The current state of an event is then the
|
|
|
|
collection of state events referenced by the event's state group.
|
|
|
|
|
|
|
|
Hence, every change in the current state causes a new state group to be
|
|
|
|
generated. However, if no change happens (e.g., if we get a message event
|
|
|
|
with only one parent it inherits the state group from its parent.)
|
|
|
|
|
|
|
|
There are three tables:
|
|
|
|
* `state_groups`: Stores group name, first event with in the group and
|
|
|
|
room id.
|
|
|
|
* `event_to_state_groups`: Maps events to state groups.
|
|
|
|
* `state_groups_state`: Maps state group to state events.
|
|
|
|
"""
|
|
|
|
|
|
|
|
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
|
|
|
|
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
|
|
|
|
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
|
|
|
|
|
|
|
|
def __init__(self, db_conn, hs):
|
|
|
|
super(StateStore, self).__init__(db_conn, hs)
|
|
|
|
self.register_background_update_handler(
|
|
|
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
|
|
|
|
self._background_deduplicate_state,
|
|
|
|
)
|
|
|
|
self.register_background_update_handler(
|
|
|
|
self.STATE_GROUP_INDEX_UPDATE_NAME,
|
|
|
|
self._background_index_state,
|
|
|
|
)
|
|
|
|
self.register_background_index_update(
|
|
|
|
self.CURRENT_STATE_INDEX_UPDATE_NAME,
|
|
|
|
index_name="current_state_events_member_index",
|
|
|
|
table="current_state_events",
|
|
|
|
columns=["state_key"],
|
|
|
|
where_clause="type='m.room.member'",
|
|
|
|
)
|
|
|
|
|
|
|
|
def _store_event_state_mappings_txn(self, txn, events_and_contexts):
|
|
|
|
state_groups = {}
|
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if event.internal_metadata.is_outlier():
|
|
|
|
continue
|
|
|
|
|
|
|
|
# if the event was rejected, just give it the same state as its
|
|
|
|
# predecessor.
|
|
|
|
if context.rejected:
|
|
|
|
state_groups[event.event_id] = context.prev_group
|
|
|
|
continue
|
|
|
|
|
|
|
|
state_groups[event.event_id] = context.state_group
|
|
|
|
|
2017-11-09 12:00:20 -07:00
|
|
|
self._simple_insert_many_txn(
|
|
|
|
txn,
|
|
|
|
table="event_to_state_groups",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"state_group": state_group_id,
|
|
|
|
"event_id": event_id,
|
|
|
|
}
|
2018-05-31 03:03:47 -06:00
|
|
|
for event_id, state_group_id in iteritems(state_groups)
|
2017-11-09 12:00:20 -07:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
for event_id, state_group_id in iteritems(state_groups):
|
2017-11-09 12:00:20 -07:00
|
|
|
txn.call_after(
|
|
|
|
self._get_state_group_for_event.prefill,
|
|
|
|
(event_id,), state_group_id
|
|
|
|
)
|
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _background_deduplicate_state(self, progress, batch_size):
|
2016-09-05 03:41:27 -06:00
|
|
|
"""This background update will slowly deduplicate state by reencoding
|
|
|
|
them as deltas.
|
|
|
|
"""
|
2016-09-05 02:34:24 -06:00
|
|
|
last_state_group = progress.get("last_state_group", 0)
|
|
|
|
rows_inserted = progress.get("rows_inserted", 0)
|
|
|
|
max_group = progress.get("max_group", None)
|
|
|
|
|
2016-09-05 08:49:57 -06:00
|
|
|
BATCH_SIZE_SCALE_FACTOR = 100
|
|
|
|
|
|
|
|
batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
|
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
if max_group is None:
|
|
|
|
rows = yield self._execute(
|
|
|
|
"_background_deduplicate_state", None,
|
|
|
|
"SELECT coalesce(max(id), 0) FROM state_groups",
|
|
|
|
)
|
|
|
|
max_group = rows[0][0]
|
|
|
|
|
|
|
|
def reindex_txn(txn):
|
|
|
|
new_last_state_group = last_state_group
|
2018-05-31 03:03:47 -06:00
|
|
|
for count in range(batch_size):
|
2016-09-05 02:34:24 -06:00
|
|
|
txn.execute(
|
|
|
|
"SELECT id, room_id FROM state_groups"
|
|
|
|
" WHERE ? < id AND id <= ?"
|
|
|
|
" ORDER BY id ASC"
|
|
|
|
" LIMIT 1",
|
|
|
|
(new_last_state_group, max_group,)
|
|
|
|
)
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row:
|
|
|
|
state_group, room_id = row
|
|
|
|
|
|
|
|
if not row or not state_group:
|
|
|
|
return True, count
|
|
|
|
|
2016-09-05 08:07:23 -06:00
|
|
|
txn.execute(
|
|
|
|
"SELECT state_group FROM state_group_edges"
|
|
|
|
" WHERE state_group = ?",
|
|
|
|
(state_group,)
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we reach a point where we've already started inserting
|
|
|
|
# edges we should stop.
|
|
|
|
if txn.fetchall():
|
|
|
|
return True, count
|
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
txn.execute(
|
|
|
|
"SELECT coalesce(max(id), 0) FROM state_groups"
|
|
|
|
" WHERE id < ? AND room_id = ?",
|
|
|
|
(state_group, room_id,)
|
|
|
|
)
|
|
|
|
prev_group, = txn.fetchone()
|
|
|
|
new_last_state_group = state_group
|
|
|
|
|
|
|
|
if prev_group:
|
|
|
|
potential_hops = self._count_state_group_hops_txn(
|
|
|
|
txn, prev_group
|
|
|
|
)
|
|
|
|
if potential_hops >= MAX_STATE_DELTA_HOPS:
|
|
|
|
# We want to ensure chains are at most this long,#
|
|
|
|
# otherwise read performance degrades.
|
|
|
|
continue
|
|
|
|
|
|
|
|
prev_state = self._get_state_groups_from_groups_txn(
|
|
|
|
txn, [prev_group], types=None
|
|
|
|
)
|
2016-09-05 07:57:14 -06:00
|
|
|
prev_state = prev_state[prev_group]
|
2016-09-05 02:34:24 -06:00
|
|
|
|
|
|
|
curr_state = self._get_state_groups_from_groups_txn(
|
|
|
|
txn, [state_group], types=None
|
|
|
|
)
|
2016-09-05 07:57:14 -06:00
|
|
|
curr_state = curr_state[state_group]
|
2016-09-05 02:34:24 -06:00
|
|
|
|
|
|
|
if not set(prev_state.keys()) - set(curr_state.keys()):
|
|
|
|
# We can only do a delta if the current has a strict super set
|
|
|
|
# of keys
|
|
|
|
|
|
|
|
delta_state = {
|
2018-05-31 03:03:47 -06:00
|
|
|
key: value for key, value in iteritems(curr_state)
|
2016-09-05 02:34:24 -06:00
|
|
|
if prev_state.get(key, None) != value
|
|
|
|
}
|
|
|
|
|
2016-09-05 08:07:23 -06:00
|
|
|
self._simple_delete_txn(
|
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={
|
|
|
|
"state_group": state_group,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2016-09-05 02:34:24 -06:00
|
|
|
self._simple_insert_txn(
|
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
values={
|
|
|
|
"state_group": state_group,
|
|
|
|
"prev_state_group": prev_group,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self._simple_delete_txn(
|
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
keyvalues={
|
|
|
|
"state_group": state_group,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
self._simple_insert_many_txn(
|
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"state_group": state_group,
|
|
|
|
"room_id": room_id,
|
|
|
|
"type": key[0],
|
|
|
|
"state_key": key[1],
|
|
|
|
"event_id": state_id,
|
|
|
|
}
|
2018-05-31 03:03:47 -06:00
|
|
|
for key, state_id in iteritems(delta_state)
|
2016-09-05 02:34:24 -06:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
progress = {
|
|
|
|
"last_state_group": state_group,
|
|
|
|
"rows_inserted": rows_inserted + batch_size,
|
|
|
|
"max_group": max_group,
|
|
|
|
}
|
|
|
|
|
|
|
|
self._background_update_progress_txn(
|
|
|
|
txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
|
|
|
|
)
|
|
|
|
|
|
|
|
return False, batch_size
|
|
|
|
|
|
|
|
finished, result = yield self.runInteraction(
|
|
|
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
|
|
|
|
)
|
|
|
|
|
|
|
|
if finished:
|
|
|
|
yield self._end_background_update(self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME)
|
|
|
|
|
2016-09-05 08:49:57 -06:00
|
|
|
defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR)
|
2016-09-08 09:18:01 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _background_index_state(self, progress, batch_size):
|
2016-09-14 03:03:48 -06:00
|
|
|
def reindex_txn(conn):
|
|
|
|
conn.rollback()
|
2016-09-14 03:18:30 -06:00
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
# postgres insists on autocommit for the index
|
|
|
|
conn.set_session(autocommit=True)
|
|
|
|
try:
|
|
|
|
txn = conn.cursor()
|
2016-09-14 03:03:48 -06:00
|
|
|
txn.execute(
|
|
|
|
"CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
|
|
|
|
" ON state_groups_state(state_group, type, state_key)"
|
|
|
|
)
|
|
|
|
txn.execute(
|
|
|
|
"DROP INDEX IF EXISTS state_groups_state_id"
|
|
|
|
)
|
2016-09-14 03:18:30 -06:00
|
|
|
finally:
|
|
|
|
conn.set_session(autocommit=False)
|
|
|
|
else:
|
|
|
|
txn = conn.cursor()
|
|
|
|
txn.execute(
|
|
|
|
"CREATE INDEX state_groups_state_type_idx"
|
|
|
|
" ON state_groups_state(state_group, type, state_key)"
|
|
|
|
)
|
|
|
|
txn.execute(
|
|
|
|
"DROP INDEX IF EXISTS state_groups_state_id"
|
|
|
|
)
|
2016-09-08 09:18:01 -06:00
|
|
|
|
2016-09-14 03:03:48 -06:00
|
|
|
yield self.runWithConnection(reindex_txn)
|
2016-09-08 09:18:01 -06:00
|
|
|
|
|
|
|
yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
|
|
|
|
|
|
|
|
defer.returnValue(1)
|