2016-06-03 04:57:26 -06:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import synapse
|
|
|
|
|
2016-06-03 07:24:19 -06:00
|
|
|
from synapse.api.constants import EventTypes, PresenceState
|
2016-06-03 04:57:26 -06:00
|
|
|
from synapse.config._base import ConfigError
|
2016-06-16 04:06:12 -06:00
|
|
|
from synapse.config.homeserver import HomeServerConfig
|
|
|
|
from synapse.config.logger import setup_logging
|
2016-06-03 04:57:26 -06:00
|
|
|
from synapse.handlers.presence import PresenceHandler
|
|
|
|
from synapse.http.site import SynapseSite
|
|
|
|
from synapse.http.server import JsonResource
|
|
|
|
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
|
|
|
from synapse.rest.client.v2_alpha import sync
|
2016-08-12 08:31:44 -06:00
|
|
|
from synapse.rest.client.v1 import events
|
2016-09-21 04:46:28 -06:00
|
|
|
from synapse.rest.client.v1.room import RoomInitialSyncRestServlet
|
|
|
|
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
2016-06-03 07:55:01 -06:00
|
|
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
2016-06-03 04:57:26 -06:00
|
|
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
|
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
|
|
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
|
|
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
|
|
|
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
|
|
|
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
|
|
|
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
|
|
|
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
2016-08-30 02:40:32 -06:00
|
|
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
2017-01-27 06:36:39 -07:00
|
|
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
2016-09-21 04:46:28 -06:00
|
|
|
from synapse.replication.slave.storage.room import RoomStore
|
2016-06-03 04:57:26 -06:00
|
|
|
from synapse.server import HomeServer
|
2016-06-03 07:55:01 -06:00
|
|
|
from synapse.storage.client_ips import ClientIpStore
|
2016-06-03 04:57:26 -06:00
|
|
|
from synapse.storage.engines import create_engine
|
2016-06-03 11:03:40 -06:00
|
|
|
from synapse.storage.presence import PresenceStore, UserPresenceState
|
2016-06-03 04:57:26 -06:00
|
|
|
from synapse.storage.roommember import RoomMemberStore
|
|
|
|
from synapse.util.async import sleep
|
|
|
|
from synapse.util.httpresourcetree import create_resource_tree
|
2017-03-17 09:33:04 -06:00
|
|
|
from synapse.util.logcontext import LoggingContext, preserve_fn, \
|
|
|
|
PreserveLoggingContext
|
2016-06-03 04:57:26 -06:00
|
|
|
from synapse.util.manhole import manhole
|
|
|
|
from synapse.util.rlimit import change_resource_limit
|
|
|
|
from synapse.util.stringutils import random_string
|
|
|
|
from synapse.util.versionstring import get_version_string
|
|
|
|
|
|
|
|
from twisted.internet import reactor, defer
|
|
|
|
from twisted.web.resource import Resource
|
|
|
|
|
|
|
|
from daemonize import Daemonize
|
|
|
|
|
|
|
|
import sys
|
|
|
|
import logging
|
|
|
|
import contextlib
|
2016-06-17 04:48:12 -06:00
|
|
|
import gc
|
2016-06-03 04:57:26 -06:00
|
|
|
import ujson as json
|
|
|
|
|
|
|
|
logger = logging.getLogger("synapse.app.synchrotron")
|
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronSlavedStore(
|
|
|
|
SlavedPushRuleStore,
|
|
|
|
SlavedEventStore,
|
|
|
|
SlavedReceiptsStore,
|
|
|
|
SlavedAccountDataStore,
|
|
|
|
SlavedApplicationServiceStore,
|
|
|
|
SlavedRegistrationStore,
|
|
|
|
SlavedFilteringStore,
|
|
|
|
SlavedPresenceStore,
|
2016-08-30 02:40:32 -06:00
|
|
|
SlavedDeviceInboxStore,
|
2017-01-27 06:36:39 -07:00
|
|
|
SlavedDeviceStore,
|
2016-09-21 04:46:28 -06:00
|
|
|
RoomStore,
|
2016-06-03 07:55:01 -06:00
|
|
|
BaseSlavedStore,
|
2016-06-03 11:10:00 -06:00
|
|
|
ClientIpStore, # After BaseSlavedStore because the constructor is different
|
2016-06-03 04:57:26 -06:00
|
|
|
):
|
|
|
|
who_forgot_in_room = (
|
|
|
|
RoomMemberStore.__dict__["who_forgot_in_room"]
|
|
|
|
)
|
|
|
|
|
2017-02-13 04:16:53 -07:00
|
|
|
did_forget = (
|
|
|
|
RoomMemberStore.__dict__["did_forget"]
|
|
|
|
)
|
|
|
|
|
2016-06-03 11:03:40 -06:00
|
|
|
# XXX: This is a bit broken because we don't persist the accepted list in a
|
|
|
|
# way that can be replicated. This means that we don't have a way to
|
|
|
|
# invalidate the cache correctly.
|
|
|
|
get_presence_list_accepted = PresenceStore.__dict__[
|
|
|
|
"get_presence_list_accepted"
|
|
|
|
]
|
2016-08-12 08:31:44 -06:00
|
|
|
get_presence_list_observers_accepted = PresenceStore.__dict__[
|
|
|
|
"get_presence_list_observers_accepted"
|
|
|
|
]
|
|
|
|
|
2016-06-03 11:03:40 -06:00
|
|
|
|
2016-06-03 07:24:19 -06:00
|
|
|
UPDATE_SYNCING_USERS_MS = 10 * 1000
|
|
|
|
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
class SynchrotronPresence(object):
|
|
|
|
def __init__(self, hs):
|
2016-08-12 08:31:44 -06:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2016-06-03 04:57:26 -06:00
|
|
|
self.http_client = hs.get_simple_http_client()
|
|
|
|
self.store = hs.get_datastore()
|
|
|
|
self.user_to_num_current_syncs = {}
|
2016-06-16 10:29:50 -06:00
|
|
|
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
2016-06-03 04:57:26 -06:00
|
|
|
self.clock = hs.get_clock()
|
2016-08-12 08:31:44 -06:00
|
|
|
self.notifier = hs.get_notifier()
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
active_presence = self.store.take_presence_startup_info()
|
|
|
|
self.user_to_current_state = {
|
|
|
|
state.user_id: state
|
|
|
|
for state in active_presence
|
|
|
|
}
|
|
|
|
|
|
|
|
self.process_id = random_string(16)
|
|
|
|
logger.info("Presence process_id is %r", self.process_id)
|
|
|
|
|
2016-06-03 07:24:19 -06:00
|
|
|
self._sending_sync = False
|
|
|
|
self._need_to_send_sync = False
|
|
|
|
self.clock.looping_call(
|
|
|
|
self._send_syncing_users_regularly,
|
|
|
|
UPDATE_SYNCING_USERS_MS,
|
|
|
|
)
|
|
|
|
|
2016-06-03 08:02:27 -06:00
|
|
|
reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
|
|
|
|
2016-08-11 04:48:30 -06:00
|
|
|
def set_state(self, user, state, ignore_status_msg=False):
|
2016-06-03 04:57:26 -06:00
|
|
|
# TODO Hows this supposed to work?
|
|
|
|
pass
|
|
|
|
|
|
|
|
get_states = PresenceHandler.get_states.__func__
|
2016-08-12 08:31:44 -06:00
|
|
|
get_state = PresenceHandler.get_state.__func__
|
|
|
|
_get_interested_parties = PresenceHandler._get_interested_parties.__func__
|
2016-06-03 04:57:26 -06:00
|
|
|
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def user_syncing(self, user_id, affect_presence):
|
|
|
|
if affect_presence:
|
|
|
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
|
|
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
2016-06-03 07:24:19 -06:00
|
|
|
prev_states = yield self.current_state_for_users([user_id])
|
|
|
|
if prev_states[user_id].state == PresenceState.OFFLINE:
|
|
|
|
# TODO: Don't block the sync request on this HTTP hit.
|
|
|
|
yield self._send_syncing_users_now()
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
def _end():
|
2016-06-06 09:37:12 -06:00
|
|
|
# We check that the user_id is in user_to_num_current_syncs because
|
|
|
|
# user_to_num_current_syncs may have been cleared if we are
|
|
|
|
# shutting down.
|
|
|
|
if affect_presence and user_id in self.user_to_num_current_syncs:
|
2016-06-03 04:57:26 -06:00
|
|
|
self.user_to_num_current_syncs[user_id] -= 1
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def _user_syncing():
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
_end()
|
|
|
|
|
|
|
|
defer.returnValue(_user_syncing())
|
|
|
|
|
2016-06-03 08:02:27 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _on_shutdown(self):
|
|
|
|
# When the synchrotron is shutdown tell the master to clear the in
|
|
|
|
# progress syncs for this process
|
|
|
|
self.user_to_num_current_syncs.clear()
|
|
|
|
yield self._send_syncing_users_now()
|
|
|
|
|
2016-06-03 07:24:19 -06:00
|
|
|
def _send_syncing_users_regularly(self):
|
|
|
|
# Only send an update if we aren't in the middle of sending one.
|
|
|
|
if not self._sending_sync:
|
|
|
|
preserve_fn(self._send_syncing_users_now)()
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _send_syncing_users_now(self):
|
|
|
|
if self._sending_sync:
|
|
|
|
# We don't want to race with sending another update.
|
|
|
|
# Instead we wait for that update to finish and send another
|
|
|
|
# update afterwards.
|
|
|
|
self._need_to_send_sync = True
|
|
|
|
return
|
|
|
|
|
|
|
|
# Flag that we are sending an update.
|
|
|
|
self._sending_sync = True
|
|
|
|
|
|
|
|
yield self.http_client.post_json_get_json(self.syncing_users_url, {
|
2016-06-03 04:57:26 -06:00
|
|
|
"process_id": self.process_id,
|
|
|
|
"syncing_users": [
|
|
|
|
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
|
|
|
if count > 0
|
|
|
|
],
|
|
|
|
})
|
|
|
|
|
2016-06-03 07:24:19 -06:00
|
|
|
# Unset the flag as we are no longer sending an update.
|
|
|
|
self._sending_sync = False
|
|
|
|
if self._need_to_send_sync:
|
|
|
|
# If something happened while we were sending the update then
|
|
|
|
# we might need to send another update.
|
|
|
|
# TODO: Check if the update that was sent matches the current state
|
|
|
|
# as we only need to send an update if they are different.
|
|
|
|
self._need_to_send_sync = False
|
|
|
|
yield self._send_syncing_users_now()
|
|
|
|
|
2016-08-12 08:31:44 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def notify_from_replication(self, states, stream_id):
|
|
|
|
parties = yield self._get_interested_parties(
|
|
|
|
states, calculate_remote_hosts=False
|
|
|
|
)
|
|
|
|
room_ids_to_states, users_to_states, _ = parties
|
|
|
|
|
|
|
|
self.notifier.on_new_event(
|
|
|
|
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
|
|
|
users=users_to_states.keys()
|
|
|
|
)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-06-03 04:57:26 -06:00
|
|
|
def process_replication(self, result):
|
|
|
|
stream = result.get("presence", {"rows": []})
|
2016-08-12 08:31:44 -06:00
|
|
|
states = []
|
2016-06-03 04:57:26 -06:00
|
|
|
for row in stream["rows"]:
|
|
|
|
(
|
|
|
|
position, user_id, state, last_active_ts,
|
|
|
|
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
|
|
currently_active
|
|
|
|
) = row
|
2016-08-12 08:31:44 -06:00
|
|
|
state = UserPresenceState(
|
2016-06-03 04:57:26 -06:00
|
|
|
user_id, state, last_active_ts,
|
|
|
|
last_federation_update_ts, last_user_sync_ts, status_msg,
|
|
|
|
currently_active
|
|
|
|
)
|
2016-08-12 08:31:44 -06:00
|
|
|
self.user_to_current_state[user_id] = state
|
|
|
|
states.append(state)
|
|
|
|
|
|
|
|
if states and "position" in stream:
|
|
|
|
stream_id = int(stream["position"])
|
|
|
|
yield self.notify_from_replication(states, stream_id)
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronTyping(object):
|
|
|
|
def __init__(self, hs):
|
|
|
|
self._latest_room_serial = 0
|
|
|
|
self._room_serials = {}
|
|
|
|
self._room_typing = {}
|
|
|
|
|
|
|
|
def stream_positions(self):
|
2016-09-09 10:22:07 -06:00
|
|
|
# We must update this typing token from the response of the previous
|
|
|
|
# sync. In particular, the stream id may "reset" back to zero/a low
|
|
|
|
# value which we *must* use for the next replication request.
|
2016-06-03 04:57:26 -06:00
|
|
|
return {"typing": self._latest_room_serial}
|
|
|
|
|
|
|
|
def process_replication(self, result):
|
|
|
|
stream = result.get("typing")
|
|
|
|
if stream:
|
|
|
|
self._latest_room_serial = int(stream["position"])
|
|
|
|
|
|
|
|
for row in stream["rows"]:
|
|
|
|
position, room_id, typing_json = row
|
|
|
|
typing = json.loads(typing_json)
|
|
|
|
self._room_serials[room_id] = position
|
|
|
|
self._room_typing[room_id] = typing
|
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronApplicationService(object):
|
|
|
|
def notify_interested_services(self, event):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class SynchrotronServer(HomeServer):
|
|
|
|
def get_db_conn(self, run_new_connection=True):
|
|
|
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
|
|
# not be passed to the database engine.
|
|
|
|
db_params = {
|
|
|
|
k: v for k, v in self.db_config.get("args", {}).items()
|
|
|
|
if not k.startswith("cp_")
|
|
|
|
}
|
|
|
|
db_conn = self.database_engine.module.connect(**db_params)
|
|
|
|
|
|
|
|
if run_new_connection:
|
|
|
|
self.database_engine.on_new_connection(db_conn)
|
|
|
|
return db_conn
|
|
|
|
|
|
|
|
def setup(self):
|
|
|
|
logger.info("Setting up.")
|
|
|
|
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
|
|
|
logger.info("Finished setting up.")
|
|
|
|
|
|
|
|
def _listen_http(self, listener_config):
|
|
|
|
port = listener_config["port"]
|
2017-01-10 10:21:41 -07:00
|
|
|
bind_addresses = listener_config["bind_addresses"]
|
2016-06-03 04:57:26 -06:00
|
|
|
site_tag = listener_config.get("tag", port)
|
|
|
|
resources = {}
|
|
|
|
for res in listener_config["resources"]:
|
|
|
|
for name in res["names"]:
|
|
|
|
if name == "metrics":
|
|
|
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
|
|
elif name == "client":
|
|
|
|
resource = JsonResource(self, canonical_json=False)
|
|
|
|
sync.register_servlets(self, resource)
|
2016-08-12 08:31:44 -06:00
|
|
|
events.register_servlets(self, resource)
|
2016-09-21 04:46:28 -06:00
|
|
|
InitialSyncRestServlet(self).register(resource)
|
|
|
|
RoomInitialSyncRestServlet(self).register(resource)
|
2016-06-03 04:57:26 -06:00
|
|
|
resources.update({
|
|
|
|
"/_matrix/client/r0": resource,
|
|
|
|
"/_matrix/client/unstable": resource,
|
|
|
|
"/_matrix/client/v2_alpha": resource,
|
2016-08-12 08:31:44 -06:00
|
|
|
"/_matrix/client/api/v1": resource,
|
2016-06-03 04:57:26 -06:00
|
|
|
})
|
|
|
|
|
|
|
|
root_resource = create_resource_tree(resources, Resource())
|
2016-12-18 12:42:43 -07:00
|
|
|
|
|
|
|
for address in bind_addresses:
|
|
|
|
reactor.listenTCP(
|
|
|
|
port,
|
|
|
|
SynapseSite(
|
|
|
|
"synapse.access.http.%s" % (site_tag,),
|
|
|
|
site_tag,
|
|
|
|
listener_config,
|
|
|
|
root_resource,
|
|
|
|
),
|
|
|
|
interface=address
|
|
|
|
)
|
|
|
|
|
2016-06-03 04:57:26 -06:00
|
|
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
|
|
|
|
2016-06-16 04:06:12 -06:00
|
|
|
def start_listening(self, listeners):
|
|
|
|
for listener in listeners:
|
2016-06-03 04:57:26 -06:00
|
|
|
if listener["type"] == "http":
|
|
|
|
self._listen_http(listener)
|
|
|
|
elif listener["type"] == "manhole":
|
2017-01-10 10:21:41 -07:00
|
|
|
bind_addresses = listener["bind_addresses"]
|
2016-12-18 12:42:43 -07:00
|
|
|
|
|
|
|
for address in bind_addresses:
|
|
|
|
reactor.listenTCP(
|
|
|
|
listener["port"],
|
|
|
|
manhole(
|
|
|
|
username="matrix",
|
|
|
|
password="rabbithole",
|
|
|
|
globals={"hs": self},
|
|
|
|
),
|
|
|
|
interface=address
|
|
|
|
)
|
2016-06-03 04:57:26 -06:00
|
|
|
else:
|
|
|
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def replicate(self):
|
|
|
|
http_client = self.get_simple_http_client()
|
|
|
|
store = self.get_datastore()
|
2016-06-16 10:29:50 -06:00
|
|
|
replication_url = self.config.worker_replication_url
|
2016-06-03 04:57:26 -06:00
|
|
|
notifier = self.get_notifier()
|
|
|
|
presence_handler = self.get_presence_handler()
|
|
|
|
typing_handler = self.get_typing_handler()
|
|
|
|
|
|
|
|
def notify_from_stream(
|
|
|
|
result, stream_name, stream_key, room=None, user=None
|
|
|
|
):
|
|
|
|
stream = result.get(stream_name)
|
|
|
|
if stream:
|
|
|
|
position_index = stream["field_names"].index("position")
|
|
|
|
if room:
|
|
|
|
room_index = stream["field_names"].index(room)
|
|
|
|
if user:
|
|
|
|
user_index = stream["field_names"].index(user)
|
|
|
|
|
|
|
|
users = ()
|
|
|
|
rooms = ()
|
|
|
|
for row in stream["rows"]:
|
|
|
|
position = row[position_index]
|
|
|
|
|
|
|
|
if user:
|
|
|
|
users = (row[user_index],)
|
|
|
|
|
|
|
|
if room:
|
|
|
|
rooms = (row[room_index],)
|
|
|
|
|
|
|
|
notifier.on_new_event(
|
|
|
|
stream_key, position, users=users, rooms=rooms
|
|
|
|
)
|
|
|
|
|
2017-01-27 06:36:39 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def notify_device_list_update(result):
|
|
|
|
stream = result.get("device_lists")
|
|
|
|
if not stream:
|
|
|
|
return
|
|
|
|
|
|
|
|
position_index = stream["field_names"].index("position")
|
|
|
|
user_index = stream["field_names"].index("user_id")
|
|
|
|
|
|
|
|
for row in stream["rows"]:
|
|
|
|
position = row[position_index]
|
|
|
|
user_id = row[user_index]
|
|
|
|
|
2017-03-16 05:51:46 -06:00
|
|
|
room_ids = yield store.get_rooms_for_user(user_id)
|
2017-01-27 06:36:39 -07:00
|
|
|
|
|
|
|
notifier.on_new_event(
|
|
|
|
"device_list_key", position, rooms=room_ids,
|
|
|
|
)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-06-03 04:57:26 -06:00
|
|
|
def notify(result):
|
|
|
|
stream = result.get("events")
|
|
|
|
if stream:
|
|
|
|
max_position = stream["position"]
|
2017-03-17 09:47:51 -06:00
|
|
|
|
|
|
|
event_map = yield store.get_events([row[1] for row in stream["rows"]])
|
|
|
|
|
2016-06-03 04:57:26 -06:00
|
|
|
for row in stream["rows"]:
|
|
|
|
position = row[0]
|
2017-03-17 09:47:51 -06:00
|
|
|
event_id = row[1]
|
|
|
|
event = event_map.get(event_id, None)
|
|
|
|
if not event:
|
|
|
|
continue
|
|
|
|
|
2016-06-03 04:57:26 -06:00
|
|
|
extra_users = ()
|
|
|
|
if event.type == EventTypes.Member:
|
|
|
|
extra_users = (event.state_key,)
|
|
|
|
notifier.on_new_room_event(
|
|
|
|
event, position, max_position, extra_users
|
|
|
|
)
|
|
|
|
|
|
|
|
notify_from_stream(
|
|
|
|
result, "push_rules", "push_rules_key", user="user_id"
|
|
|
|
)
|
|
|
|
notify_from_stream(
|
|
|
|
result, "user_account_data", "account_data_key", user="user_id"
|
|
|
|
)
|
|
|
|
notify_from_stream(
|
|
|
|
result, "room_account_data", "account_data_key", user="user_id"
|
|
|
|
)
|
|
|
|
notify_from_stream(
|
|
|
|
result, "tag_account_data", "account_data_key", user="user_id"
|
|
|
|
)
|
|
|
|
notify_from_stream(
|
|
|
|
result, "receipts", "receipt_key", room="room_id"
|
|
|
|
)
|
|
|
|
notify_from_stream(
|
|
|
|
result, "typing", "typing_key", room="room_id"
|
|
|
|
)
|
2016-09-01 11:08:40 -06:00
|
|
|
notify_from_stream(
|
|
|
|
result, "to_device", "to_device_key", user="user_id"
|
|
|
|
)
|
2017-01-27 06:36:39 -07:00
|
|
|
yield notify_device_list_update(result)
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
args = store.stream_positions()
|
|
|
|
args.update(typing_handler.stream_positions())
|
|
|
|
args["timeout"] = 30000
|
|
|
|
result = yield http_client.get_json(replication_url, args=args)
|
|
|
|
yield store.process_replication(result)
|
|
|
|
typing_handler.process_replication(result)
|
2016-08-12 08:31:44 -06:00
|
|
|
yield presence_handler.process_replication(result)
|
2017-01-27 06:36:39 -07:00
|
|
|
yield notify(result)
|
2016-06-03 04:57:26 -06:00
|
|
|
except:
|
|
|
|
logger.exception("Error replicating from %r", replication_url)
|
2016-06-06 09:05:28 -06:00
|
|
|
yield sleep(5)
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
def build_presence_handler(self):
|
|
|
|
return SynchrotronPresence(self)
|
|
|
|
|
|
|
|
def build_typing_handler(self):
|
|
|
|
return SynchrotronTyping(self)
|
|
|
|
|
2016-06-16 05:53:15 -06:00
|
|
|
|
2016-06-16 10:29:50 -06:00
|
|
|
def start(config_options):
|
2016-06-03 04:57:26 -06:00
|
|
|
try:
|
2016-06-16 04:06:12 -06:00
|
|
|
config = HomeServerConfig.load_config(
|
2016-06-03 04:57:26 -06:00
|
|
|
"Synapse synchrotron", config_options
|
|
|
|
)
|
|
|
|
except ConfigError as e:
|
|
|
|
sys.stderr.write("\n" + e.message + "\n")
|
|
|
|
sys.exit(1)
|
|
|
|
|
2016-06-16 10:29:50 -06:00
|
|
|
assert config.worker_app == "synapse.app.synchrotron"
|
2016-06-03 04:57:26 -06:00
|
|
|
|
2017-03-10 08:16:50 -07:00
|
|
|
setup_logging(config, use_worker_options=True)
|
2016-06-16 04:06:12 -06:00
|
|
|
|
2016-11-08 04:07:18 -07:00
|
|
|
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
|
|
|
|
2016-06-03 04:57:26 -06:00
|
|
|
database_engine = create_engine(config.database_config)
|
|
|
|
|
|
|
|
ss = SynchrotronServer(
|
|
|
|
config.server_name,
|
|
|
|
db_config=config.database_config,
|
|
|
|
config=config,
|
2016-08-05 09:36:07 -06:00
|
|
|
version_string="Synapse/" + get_version_string(synapse),
|
2016-06-03 04:57:26 -06:00
|
|
|
database_engine=database_engine,
|
|
|
|
application_service_handler=SynchrotronApplicationService(),
|
|
|
|
)
|
|
|
|
|
|
|
|
ss.setup()
|
2016-06-16 10:29:50 -06:00
|
|
|
ss.start_listening(config.worker_listeners)
|
2016-06-16 04:06:12 -06:00
|
|
|
|
|
|
|
def run():
|
2017-03-17 09:33:04 -06:00
|
|
|
# make sure that we run the reactor with the sentinel log context,
|
|
|
|
# otherwise other PreserveLoggingContext instances will get confused
|
|
|
|
# and complain when they see the logcontext arbitrarily swapping
|
|
|
|
# between the sentinel and `run` logcontexts.
|
|
|
|
with PreserveLoggingContext():
|
2016-06-16 04:06:12 -06:00
|
|
|
logger.info("Running")
|
2016-06-16 10:29:50 -06:00
|
|
|
change_resource_limit(config.soft_file_limit)
|
|
|
|
if config.gc_thresholds:
|
2016-06-17 04:48:12 -06:00
|
|
|
gc.set_threshold(*config.gc_thresholds)
|
2016-06-16 04:06:12 -06:00
|
|
|
reactor.run()
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
def start():
|
|
|
|
ss.get_datastore().start_profiling()
|
|
|
|
ss.replicate()
|
2016-09-15 07:31:22 -06:00
|
|
|
ss.get_state_handler().start_caching()
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
reactor.callWhenRunning(start)
|
|
|
|
|
2016-06-16 10:29:50 -06:00
|
|
|
if config.worker_daemonize:
|
2016-06-16 04:06:12 -06:00
|
|
|
daemon = Daemonize(
|
|
|
|
app="synapse-synchrotron",
|
2016-06-16 10:29:50 -06:00
|
|
|
pid=config.worker_pid_file,
|
2016-06-16 04:06:12 -06:00
|
|
|
action=run,
|
|
|
|
auto_close_fds=False,
|
|
|
|
verbose=True,
|
|
|
|
logger=logger,
|
|
|
|
)
|
|
|
|
daemon.start()
|
|
|
|
else:
|
|
|
|
run()
|
2016-06-03 04:57:26 -06:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
with LoggingContext("main"):
|
2016-06-16 10:29:50 -06:00
|
|
|
start(sys.argv[1:])
|