Merge pull request #65 from matrix-org/get_event_cache
Add an in-memory cache for get_event in the storage layer
This commit is contained in:
commit
40f332e534
|
@ -27,6 +27,16 @@ class Config(object):
|
||||||
def __init__(self, args):
|
def __init__(self, args):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_size(string):
|
||||||
|
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||||
|
size = 1
|
||||||
|
suffix = string[-1]
|
||||||
|
if suffix in sizes:
|
||||||
|
string = string[:-1]
|
||||||
|
size = sizes[suffix]
|
||||||
|
return int(string) * size
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def abspath(file_path):
|
def abspath(file_path):
|
||||||
return os.path.abspath(file_path) if file_path else file_path
|
return os.path.abspath(file_path) if file_path else file_path
|
||||||
|
|
|
@ -24,6 +24,7 @@ class DatabaseConfig(Config):
|
||||||
self.database_path = ":memory:"
|
self.database_path = ":memory:"
|
||||||
else:
|
else:
|
||||||
self.database_path = self.abspath(args.database_path)
|
self.database_path = self.abspath(args.database_path)
|
||||||
|
self.event_cache_size = self.parse_size(args.event_cache_size)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def add_arguments(cls, parser):
|
def add_arguments(cls, parser):
|
||||||
|
@ -33,6 +34,10 @@ class DatabaseConfig(Config):
|
||||||
"-d", "--database-path", default="homeserver.db",
|
"-d", "--database-path", default="homeserver.db",
|
||||||
help="The database name."
|
help="The database name."
|
||||||
)
|
)
|
||||||
|
db_group.add_argument(
|
||||||
|
"--event-cache-size", default="100K",
|
||||||
|
help="Number of events to cache in memory."
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def generate_config(cls, args, config_dir_path):
|
def generate_config(cls, args, config_dir_path):
|
||||||
|
|
|
@ -164,6 +164,9 @@ class DataStore(RoomMemberStore, RoomStore,
|
||||||
stream_ordering=None, is_new_state=True,
|
stream_ordering=None, is_new_state=True,
|
||||||
current_state=None):
|
current_state=None):
|
||||||
|
|
||||||
|
# Remove the any existing cache entries for the event_id
|
||||||
|
self._get_event_cache.pop(event.event_id)
|
||||||
|
|
||||||
# We purposefully do this first since if we include a `current_state`
|
# We purposefully do this first since if we include a `current_state`
|
||||||
# key, we *want* to update the `current_state_events` table
|
# key, we *want* to update the `current_state_events` table
|
||||||
if current_state:
|
if current_state:
|
||||||
|
@ -420,6 +423,8 @@ class DataStore(RoomMemberStore, RoomStore,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _store_redaction(self, txn, event):
|
def _store_redaction(self, txn, event):
|
||||||
|
# invalidate the cache for the redacted event
|
||||||
|
self._get_event_cache.pop(event.redacts)
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"INSERT OR IGNORE INTO redactions "
|
"INSERT OR IGNORE INTO redactions "
|
||||||
"(event_id, redacts) VALUES (?,?)",
|
"(event_id, redacts) VALUES (?,?)",
|
||||||
|
|
|
@ -19,6 +19,7 @@ from synapse.events import FrozenEvent
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
from synapse.util.logcontext import PreserveLoggingContext, LoggingContext
|
from synapse.util.logcontext import PreserveLoggingContext, LoggingContext
|
||||||
|
from synapse.util.lrucache import LruCache
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
@ -128,6 +129,8 @@ class SQLBaseStore(object):
|
||||||
self._txn_perf_counters = PerformanceCounters()
|
self._txn_perf_counters = PerformanceCounters()
|
||||||
self._get_event_counters = PerformanceCounters()
|
self._get_event_counters = PerformanceCounters()
|
||||||
|
|
||||||
|
self._get_event_cache = LruCache(hs.config.event_cache_size)
|
||||||
|
|
||||||
def start_profiling(self):
|
def start_profiling(self):
|
||||||
self._previous_loop_ts = self._clock.time_msec()
|
self._previous_loop_ts = self._clock.time_msec()
|
||||||
|
|
||||||
|
@ -579,6 +582,19 @@ class SQLBaseStore(object):
|
||||||
|
|
||||||
def _get_event_txn(self, txn, event_id, check_redacted=True,
|
def _get_event_txn(self, txn, event_id, check_redacted=True,
|
||||||
get_prev_content=False, allow_rejected=False):
|
get_prev_content=False, allow_rejected=False):
|
||||||
|
|
||||||
|
start_time = time.time() * 1000
|
||||||
|
update_counter = self._get_event_counters.update
|
||||||
|
|
||||||
|
try:
|
||||||
|
cache = self._get_event_cache.setdefault(event_id, {})
|
||||||
|
# Separate cache entries for each way to invoke _get_event_txn
|
||||||
|
return cache[(check_redacted, get_prev_content, allow_rejected)]
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
start_time = update_counter("event_cache", start_time)
|
||||||
|
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT e.internal_metadata, e.json, r.event_id, rej.reason "
|
"SELECT e.internal_metadata, e.json, r.event_id, rej.reason "
|
||||||
"FROM event_json as e "
|
"FROM event_json as e "
|
||||||
|
@ -588,8 +604,6 @@ class SQLBaseStore(object):
|
||||||
"LIMIT 1 "
|
"LIMIT 1 "
|
||||||
)
|
)
|
||||||
|
|
||||||
start_time = time.time() * 1000
|
|
||||||
|
|
||||||
txn.execute(sql, (event_id,))
|
txn.execute(sql, (event_id,))
|
||||||
|
|
||||||
res = txn.fetchone()
|
res = txn.fetchone()
|
||||||
|
@ -599,14 +613,16 @@ class SQLBaseStore(object):
|
||||||
|
|
||||||
internal_metadata, js, redacted, rejected_reason = res
|
internal_metadata, js, redacted, rejected_reason = res
|
||||||
|
|
||||||
self._get_event_counters.update("select_event", start_time)
|
start_time = update_counter("select_event", start_time)
|
||||||
|
|
||||||
if allow_rejected or not rejected_reason:
|
if allow_rejected or not rejected_reason:
|
||||||
return self._get_event_from_row_txn(
|
result = self._get_event_from_row_txn(
|
||||||
txn, internal_metadata, js, redacted,
|
txn, internal_metadata, js, redacted,
|
||||||
check_redacted=check_redacted,
|
check_redacted=check_redacted,
|
||||||
get_prev_content=get_prev_content,
|
get_prev_content=get_prev_content,
|
||||||
)
|
)
|
||||||
|
cache[(check_redacted, get_prev_content, allow_rejected)] = result
|
||||||
|
return result
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class LruCache(object):
|
||||||
|
"""Least-recently-used cache."""
|
||||||
|
# TODO(mjark) Add hit/miss counters
|
||||||
|
# TODO(mjark) Add mutex for linked list for thread safety.
|
||||||
|
def __init__(self, max_size):
|
||||||
|
cache = {}
|
||||||
|
list_root = []
|
||||||
|
list_root[:] = [list_root, list_root, None, None]
|
||||||
|
|
||||||
|
PREV, NEXT, KEY, VALUE = 0, 1, 2, 3
|
||||||
|
|
||||||
|
def add_node(key, value):
|
||||||
|
prev_node = list_root
|
||||||
|
next_node = prev_node[NEXT]
|
||||||
|
node = [prev_node, next_node, key, value]
|
||||||
|
prev_node[NEXT] = node
|
||||||
|
next_node[PREV] = node
|
||||||
|
cache[key] = node
|
||||||
|
|
||||||
|
def move_node_to_front(node):
|
||||||
|
prev_node = node[PREV]
|
||||||
|
next_node = node[NEXT]
|
||||||
|
prev_node[NEXT] = next_node
|
||||||
|
next_node[PREV] = prev_node
|
||||||
|
prev_node = list_root
|
||||||
|
next_node = prev_node[NEXT]
|
||||||
|
node[PREV] = prev_node
|
||||||
|
node[NEXT] = next_node
|
||||||
|
prev_node[NEXT] = node
|
||||||
|
next_node[PREV] = node
|
||||||
|
|
||||||
|
def delete_node(node):
|
||||||
|
prev_node = node[PREV]
|
||||||
|
next_node = node[NEXT]
|
||||||
|
prev_node[NEXT] = next_node
|
||||||
|
next_node[PREV] = prev_node
|
||||||
|
cache.pop(node[KEY], None)
|
||||||
|
|
||||||
|
def cache_get(key, default=None):
|
||||||
|
node = cache.get(key, None)
|
||||||
|
if node is not None:
|
||||||
|
move_node_to_front(node)
|
||||||
|
return node[VALUE]
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
def cache_set(key, value):
|
||||||
|
node = cache.get(key, None)
|
||||||
|
if node is not None:
|
||||||
|
move_node_to_front(node)
|
||||||
|
node[VALUE] = value
|
||||||
|
else:
|
||||||
|
add_node(key, value)
|
||||||
|
if len(cache) > max_size:
|
||||||
|
delete_node(list_root[PREV])
|
||||||
|
|
||||||
|
def cache_set_default(key, value):
|
||||||
|
node = cache.get(key, None)
|
||||||
|
if node is not None:
|
||||||
|
return node[VALUE]
|
||||||
|
else:
|
||||||
|
add_node(key, value)
|
||||||
|
if len(cache) > max_size:
|
||||||
|
delete_node(list_root[PREV])
|
||||||
|
return value
|
||||||
|
|
||||||
|
def cache_pop(key, default=None):
|
||||||
|
node = cache.get(key, None)
|
||||||
|
if node:
|
||||||
|
delete_node(node)
|
||||||
|
return node[VALUE]
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
self.sentinel = object()
|
||||||
|
self.get = cache_get
|
||||||
|
self.set = cache_set
|
||||||
|
self.setdefault = cache_set_default
|
||||||
|
self.pop = cache_pop
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
result = self.get(key, self.sentinel)
|
||||||
|
if result is self.sentinel:
|
||||||
|
raise KeyError()
|
||||||
|
else:
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
self.set(key, value)
|
||||||
|
|
||||||
|
def __delitem__(self, key, value):
|
||||||
|
result = self.pop(key, self.sentinel)
|
||||||
|
if result is self.sentinel:
|
||||||
|
raise KeyError()
|
|
@ -38,8 +38,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
|
||||||
return defer.succeed(func(self.mock_txn, *args, **kwargs))
|
return defer.succeed(func(self.mock_txn, *args, **kwargs))
|
||||||
self.db_pool.runInteraction = runInteraction
|
self.db_pool.runInteraction = runInteraction
|
||||||
|
|
||||||
hs = HomeServer("test",
|
config = Mock()
|
||||||
db_pool=self.db_pool)
|
config.event_cache_size = 1
|
||||||
|
hs = HomeServer("test", db_pool=self.db_pool, config=config)
|
||||||
|
|
||||||
self.datastore = SQLBaseStore(hs)
|
self.datastore = SQLBaseStore(hs)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
from .. import unittest
|
||||||
|
|
||||||
|
from synapse.util.lrucache import LruCache
|
||||||
|
|
||||||
|
class LruCacheTestCase(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_get_set(self):
|
||||||
|
cache = LruCache(1)
|
||||||
|
cache["key"] = "value"
|
||||||
|
self.assertEquals(cache.get("key"), "value")
|
||||||
|
self.assertEquals(cache["key"], "value")
|
||||||
|
|
||||||
|
def test_eviction(self):
|
||||||
|
cache = LruCache(2)
|
||||||
|
cache[1] = 1
|
||||||
|
cache[2] = 2
|
||||||
|
|
||||||
|
self.assertEquals(cache.get(1), 1)
|
||||||
|
self.assertEquals(cache.get(2), 2)
|
||||||
|
|
||||||
|
cache[3] = 3
|
||||||
|
|
||||||
|
self.assertEquals(cache.get(1), None)
|
||||||
|
self.assertEquals(cache.get(2), 2)
|
||||||
|
self.assertEquals(cache.get(3), 3)
|
||||||
|
|
||||||
|
def test_setdefault(self):
|
||||||
|
cache = LruCache(1)
|
||||||
|
self.assertEquals(cache.setdefault("key", 1), 1)
|
||||||
|
self.assertEquals(cache.get("key"), 1)
|
||||||
|
self.assertEquals(cache.setdefault("key", 2), 1)
|
||||||
|
self.assertEquals(cache.get("key"), 1)
|
||||||
|
|
||||||
|
def test_pop(self):
|
||||||
|
cache = LruCache(1)
|
||||||
|
cache["key"] = 1
|
||||||
|
self.assertEquals(cache.pop("key"), 1)
|
||||||
|
self.assertEquals(cache.pop("key"), None)
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
|
||||||
if config is None:
|
if config is None:
|
||||||
config = Mock()
|
config = Mock()
|
||||||
config.signing_key = [MockKey()]
|
config.signing_key = [MockKey()]
|
||||||
|
config.event_cache_size = 1
|
||||||
|
|
||||||
if datastore is None:
|
if datastore is None:
|
||||||
db_pool = SQLiteMemoryDbPool()
|
db_pool = SQLiteMemoryDbPool()
|
||||||
|
|
Loading…
Reference in New Issue