2014-08-12 08:10:52 -06:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 21:26:29 -07:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2014-08-12 08:10:52 -06:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2014-08-12 20:14:34 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
"""This module is responsible for keeping track of presence status of local
|
|
|
|
and remote users.
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
The methods that define policy are:
|
|
|
|
- PresenceHandler._update_states
|
|
|
|
- PresenceHandler._handle_timeouts
|
|
|
|
- should_notify
|
|
|
|
"""
|
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
import logging
|
2016-02-15 10:10:40 -07:00
|
|
|
from contextlib import contextmanager
|
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
from six import iteritems, itervalues
|
|
|
|
|
|
|
|
from prometheus_client import Counter
|
|
|
|
|
|
|
|
from twisted.internet import defer
|
2018-04-28 05:19:12 -06:00
|
|
|
|
2019-03-26 06:45:22 -06:00
|
|
|
import synapse.metrics
|
|
|
|
from synapse.api.constants import EventTypes, Membership, PresenceState
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.api.errors import SynapseError
|
|
|
|
from synapse.metrics import LaterGauge
|
2019-03-26 06:45:22 -06:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2016-02-15 10:10:40 -07:00
|
|
|
from synapse.storage.presence import UserPresenceState
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.types import UserID, get_domain_from_id
|
2018-08-10 07:50:21 -06:00
|
|
|
from synapse.util.async_helpers import Linearizer
|
2018-07-09 00:09:20 -06:00
|
|
|
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
2018-04-27 04:07:40 -06:00
|
|
|
from synapse.util.logcontext import run_in_background
|
2014-08-28 11:45:00 -06:00
|
|
|
from synapse.util.logutils import log_function
|
2016-02-19 04:50:48 -07:00
|
|
|
from synapse.util.metrics import Measure
|
2016-02-15 10:10:40 -07:00
|
|
|
from synapse.util.wheel_timer import WheelTimer
|
2018-05-21 18:47:37 -06:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2015-03-04 09:50:23 -07:00
|
|
|
|
2018-05-21 18:47:37 -06:00
|
|
|
notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "")
|
2018-05-22 16:32:57 -06:00
|
|
|
federation_presence_out_counter = Counter(
|
|
|
|
"synapse_handler_presence_federation_presence_out", "")
|
2018-05-21 18:47:37 -06:00
|
|
|
presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "")
|
|
|
|
timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "")
|
|
|
|
federation_presence_counter = Counter("synapse_handler_presence_federation_presence", "")
|
|
|
|
bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", "")
|
2016-02-19 02:50:54 -07:00
|
|
|
|
2018-05-21 18:47:37 -06:00
|
|
|
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
|
2016-06-03 06:40:55 -06:00
|
|
|
|
2018-05-22 16:32:57 -06:00
|
|
|
notify_reason_counter = Counter(
|
|
|
|
"synapse_handler_presence_notify_reason", "", ["reason"])
|
|
|
|
state_transition_counter = Counter(
|
|
|
|
"synapse_handler_presence_state_transition", "", ["from", "to"]
|
2016-09-06 04:31:01 -06:00
|
|
|
)
|
2016-09-05 07:12:11 -06:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
|
|
|
# "currently_active"
|
2016-02-02 10:18:50 -07:00
|
|
|
LAST_ACTIVE_GRANULARITY = 60 * 1000
|
2015-03-23 11:25:44 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
# How long to wait until a new /events or /sync request before assuming
|
|
|
|
# the client has gone.
|
|
|
|
SYNC_ONLINE_TIMEOUT = 30 * 1000
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
# How long to wait before marking the user as idle. Compared against last active
|
|
|
|
IDLE_TIMER = 5 * 60 * 1000
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
# How often we expect remote servers to resend us presence.
|
|
|
|
FEDERATION_TIMEOUT = 30 * 60 * 1000
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
# How often to resend presence to remote servers
|
|
|
|
FEDERATION_PING_INTERVAL = 25 * 60 * 1000
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-06-02 08:20:15 -06:00
|
|
|
# How long we will wait before assuming that the syncs from an external process
|
|
|
|
# are dead.
|
|
|
|
EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
|
2014-08-12 08:10:52 -06:00
|
|
|
|
|
|
|
|
2016-05-16 12:08:40 -06:00
|
|
|
class PresenceHandler(object):
|
2014-08-12 08:10:52 -06:00
|
|
|
|
|
|
|
def __init__(self, hs):
|
2018-05-17 10:35:31 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
Args:
|
|
|
|
hs (synapse.server.HomeServer):
|
|
|
|
"""
|
2018-08-13 00:47:46 -06:00
|
|
|
self.hs = hs
|
2016-05-16 12:08:40 -06:00
|
|
|
self.is_mine = hs.is_mine
|
|
|
|
self.is_mine_id = hs.is_mine_id
|
2019-03-26 06:45:22 -06:00
|
|
|
self.server_name = hs.hostname
|
2014-08-13 12:18:55 -06:00
|
|
|
self.clock = hs.get_clock()
|
2016-02-15 10:10:40 -07:00
|
|
|
self.store = hs.get_datastore()
|
|
|
|
self.wheel_timer = WheelTimer()
|
|
|
|
self.notifier = hs.get_notifier()
|
2016-11-16 07:28:03 -07:00
|
|
|
self.federation = hs.get_federation_sender()
|
2016-08-26 07:54:30 -06:00
|
|
|
self.state = hs.get_state_handler()
|
|
|
|
|
2018-03-12 10:17:08 -06:00
|
|
|
federation_registry = hs.get_federation_registry()
|
|
|
|
|
|
|
|
federation_registry.register_edu_handler(
|
2014-08-12 08:10:52 -06:00
|
|
|
"m.presence", self.incoming_presence
|
|
|
|
)
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
active_presence = self.store.take_presence_startup_info()
|
|
|
|
|
|
|
|
# A dictionary of the current state of users. This is prefilled with
|
|
|
|
# non-offline presence from the DB. We should fetch from the DB if
|
|
|
|
# we can't find a users presence in here.
|
|
|
|
self.user_to_current_state = {
|
|
|
|
state.user_id: state
|
|
|
|
for state in active_presence
|
|
|
|
}
|
|
|
|
|
2018-05-21 18:47:37 -06:00
|
|
|
LaterGauge(
|
2018-05-22 09:56:03 -06:00
|
|
|
"synapse_handlers_presence_user_to_current_state_size", "", [],
|
|
|
|
lambda: len(self.user_to_current_state)
|
|
|
|
)
|
2016-03-01 05:56:39 -07:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
now = self.clock.time_msec()
|
|
|
|
for state in active_presence:
|
|
|
|
self.wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=state.user_id,
|
2016-02-18 03:11:43 -07:00
|
|
|
then=state.last_active_ts + IDLE_TIMER,
|
2016-02-15 10:10:40 -07:00
|
|
|
)
|
|
|
|
self.wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=state.user_id,
|
2016-02-18 03:11:43 -07:00
|
|
|
then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT,
|
2016-02-15 10:10:40 -07:00
|
|
|
)
|
2016-05-16 12:08:40 -06:00
|
|
|
if self.is_mine_id(state.user_id):
|
2016-02-15 10:10:40 -07:00
|
|
|
self.wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=state.user_id,
|
2016-02-18 03:11:43 -07:00
|
|
|
then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL,
|
2016-02-15 10:10:40 -07:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=state.user_id,
|
2016-02-18 03:11:43 -07:00
|
|
|
then=state.last_federation_update_ts + FEDERATION_TIMEOUT,
|
2016-02-15 10:10:40 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
# Set of users who have presence in the `user_to_current_state` that
|
|
|
|
# have not yet been persisted
|
|
|
|
self.unpersisted_users_changes = set()
|
|
|
|
|
2018-06-25 07:08:28 -06:00
|
|
|
hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown)
|
2016-02-15 10:10:40 -07:00
|
|
|
|
|
|
|
self.serial_to_user = {}
|
|
|
|
self._next_serial = 1
|
|
|
|
|
2016-06-02 08:20:15 -06:00
|
|
|
# Keeps track of the number of *ongoing* syncs on this process. While
|
|
|
|
# this is non zero a user will never go offline.
|
2016-02-15 10:10:40 -07:00
|
|
|
self.user_to_num_current_syncs = {}
|
|
|
|
|
2016-06-02 08:20:15 -06:00
|
|
|
# Keeps track of the number of *ongoing* syncs on other processes.
|
|
|
|
# While any sync is ongoing on another process the user will never
|
|
|
|
# go offline.
|
|
|
|
# Each process has a unique identifier and an update frequency. If
|
|
|
|
# no update is received from that process within the update period then
|
|
|
|
# we assume that all the sync requests on that process have stopped.
|
|
|
|
# Stored as a dict from process_id to set of user_id, and a dict of
|
|
|
|
# process_id to millisecond timestamp last updated.
|
|
|
|
self.external_process_to_current_syncs = {}
|
|
|
|
self.external_process_last_updated_ms = {}
|
2017-03-27 08:56:25 -06:00
|
|
|
self.external_sync_linearizer = Linearizer(name="external_sync_linearizer")
|
2016-06-02 08:20:15 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
# Start a LoopingCall in 30s that fires every 5s.
|
|
|
|
# The initial delay is to allow disconnected clients a chance to
|
|
|
|
# reconnect before we treat them as offline.
|
|
|
|
self.clock.call_later(
|
2016-06-06 08:14:21 -06:00
|
|
|
30,
|
2016-02-15 10:10:40 -07:00
|
|
|
self.clock.looping_call,
|
|
|
|
self._handle_timeouts,
|
|
|
|
5000,
|
2015-03-04 09:50:23 -07:00
|
|
|
)
|
|
|
|
|
2016-08-30 08:39:50 -06:00
|
|
|
self.clock.call_later(
|
|
|
|
60,
|
|
|
|
self.clock.looping_call,
|
|
|
|
self._persist_unpersisted_changes,
|
|
|
|
60 * 1000,
|
|
|
|
)
|
|
|
|
|
2018-05-22 16:32:57 -06:00
|
|
|
LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [],
|
|
|
|
lambda: len(self.wheel_timer))
|
2016-02-19 02:50:54 -07:00
|
|
|
|
2019-03-26 06:45:22 -06:00
|
|
|
# Used to handle sending of presence to newly joined users/servers
|
|
|
|
if hs.config.use_presence:
|
|
|
|
self.notifier.add_replication_callback(self.notify_new_event)
|
|
|
|
|
|
|
|
# Presence is best effort and quickly heals itself, so lets just always
|
|
|
|
# stream from the current state when we restart.
|
|
|
|
self._event_pos = self.store.get_current_events_token()
|
|
|
|
self._event_processing = False
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _on_shutdown(self):
|
|
|
|
"""Gets called when shutting down. This lets us persist any updates that
|
|
|
|
we haven't yet persisted, e.g. updates that only changes some internal
|
|
|
|
timers. This allows changes to persist across startup without having to
|
|
|
|
persist every single change.
|
|
|
|
|
|
|
|
If this does not run it simply means that some of the timers will fire
|
|
|
|
earlier than they should when synapse is restarted. This affect of this
|
|
|
|
is some spurious presence changes that will self-correct.
|
|
|
|
"""
|
2018-08-13 00:47:46 -06:00
|
|
|
# If the DB pool has already terminated, don't try updating
|
|
|
|
if not self.hs.get_db_pool().running:
|
|
|
|
return
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
logger.info(
|
2016-09-13 06:26:33 -06:00
|
|
|
"Performing _on_shutdown. Persisting %d unpersisted changes",
|
2016-02-15 10:10:40 -07:00
|
|
|
len(self.user_to_current_state)
|
|
|
|
)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
if self.unpersisted_users_changes:
|
|
|
|
yield self.store.update_presence([
|
|
|
|
self.user_to_current_state[user_id]
|
|
|
|
for user_id in self.unpersisted_users_changes
|
|
|
|
])
|
|
|
|
logger.info("Finished _on_shutdown")
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-08-30 08:39:50 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _persist_unpersisted_changes(self):
|
|
|
|
"""We periodically persist the unpersisted changes, as otherwise they
|
|
|
|
may stack up and slow down shutdown times.
|
|
|
|
"""
|
|
|
|
logger.info(
|
2016-09-13 06:26:33 -06:00
|
|
|
"Performing _persist_unpersisted_changes. Persisting %d unpersisted changes",
|
2016-09-01 07:50:06 -06:00
|
|
|
len(self.unpersisted_users_changes)
|
2016-08-30 08:39:50 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
unpersisted = self.unpersisted_users_changes
|
|
|
|
self.unpersisted_users_changes = set()
|
|
|
|
|
2016-08-30 08:50:20 -06:00
|
|
|
if unpersisted:
|
2016-08-30 08:39:50 -06:00
|
|
|
yield self.store.update_presence([
|
|
|
|
self.user_to_current_state[user_id]
|
|
|
|
for user_id in unpersisted
|
|
|
|
])
|
|
|
|
|
|
|
|
logger.info("Finished _persist_unpersisted_changes")
|
|
|
|
|
2018-04-27 04:07:40 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _update_states_and_catch_exception(self, new_states):
|
|
|
|
try:
|
|
|
|
res = yield self._update_states(new_states)
|
|
|
|
defer.returnValue(res)
|
|
|
|
except Exception:
|
|
|
|
logger.exception("Error updating presence")
|
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-02-15 10:10:40 -07:00
|
|
|
def _update_states(self, new_states):
|
|
|
|
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
|
|
|
the notifier and federation if and only if the changed presence state
|
|
|
|
should be sent to clients/servers.
|
|
|
|
"""
|
|
|
|
now = self.clock.time_msec()
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
with Measure(self.clock, "presence_update_states"):
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
# NOTE: We purposefully don't yield between now and when we've
|
|
|
|
# calculated what we want to do with the new states, to avoid races.
|
2016-02-18 02:54:08 -07:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
to_notify = {} # Changes we want to notify everyone about
|
|
|
|
to_federation_ping = {} # These need sending keep-alives
|
2014-09-02 06:30:36 -06:00
|
|
|
|
2016-09-09 07:26:05 -06:00
|
|
|
# Only bother handling the last presence change for each user
|
|
|
|
new_states_dict = {}
|
|
|
|
for new_state in new_states:
|
|
|
|
new_states_dict[new_state.user_id] = new_state
|
|
|
|
new_state = new_states_dict.values()
|
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
for new_state in new_states:
|
|
|
|
user_id = new_state.user_id
|
2015-08-18 03:30:07 -06:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
# Its fine to not hit the database here, as the only thing not in
|
|
|
|
# the current state cache are OFFLINE states, where the only field
|
|
|
|
# of interest is last_active which is safe enough to assume is 0
|
|
|
|
# here.
|
|
|
|
prev_state = self.user_to_current_state.get(
|
|
|
|
user_id, UserPresenceState.default(user_id)
|
|
|
|
)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
new_state, should_notify, should_ping = handle_update(
|
|
|
|
prev_state, new_state,
|
2016-05-16 12:08:40 -06:00
|
|
|
is_mine=self.is_mine_id(user_id),
|
2016-02-19 04:50:48 -07:00
|
|
|
wheel_timer=self.wheel_timer,
|
|
|
|
now=now
|
|
|
|
)
|
2016-02-15 10:10:40 -07:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
self.user_to_current_state[user_id] = new_state
|
2014-08-13 12:18:55 -06:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
if should_notify:
|
|
|
|
to_notify[user_id] = new_state
|
|
|
|
elif should_ping:
|
|
|
|
to_federation_ping[user_id] = new_state
|
2016-02-19 02:50:54 -07:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
# TODO: We should probably ensure there are no races hereafter
|
2016-02-15 10:10:40 -07:00
|
|
|
|
2018-05-21 18:47:37 -06:00
|
|
|
presence_updates_counter.inc(len(new_states))
|
2016-02-15 10:10:40 -07:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
if to_notify:
|
2018-05-21 18:47:37 -06:00
|
|
|
notified_presence_counter.inc(len(to_notify))
|
2018-05-31 03:03:47 -06:00
|
|
|
yield self._persist_and_notify(list(to_notify.values()))
|
2016-02-19 04:50:48 -07:00
|
|
|
|
|
|
|
self.unpersisted_users_changes |= set(s.user_id for s in new_states)
|
|
|
|
self.unpersisted_users_changes -= set(to_notify.keys())
|
2014-11-18 08:25:55 -07:00
|
|
|
|
2016-02-19 04:50:48 -07:00
|
|
|
to_federation_ping = {
|
|
|
|
user_id: state for user_id, state in to_federation_ping.items()
|
|
|
|
if user_id not in to_notify
|
|
|
|
}
|
|
|
|
if to_federation_ping:
|
2018-05-21 18:47:37 -06:00
|
|
|
federation_presence_out_counter.inc(len(to_federation_ping))
|
2016-02-23 07:03:46 -07:00
|
|
|
|
2017-04-10 09:48:30 -06:00
|
|
|
self._push_to_remotes(to_federation_ping.values())
|
2014-11-18 08:25:55 -07:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
def _handle_timeouts(self):
|
|
|
|
"""Checks the presence of users that have timed out and updates as
|
|
|
|
appropriate.
|
|
|
|
"""
|
2016-06-06 08:44:41 -06:00
|
|
|
logger.info("Handling presence timeouts")
|
2016-02-15 10:10:40 -07:00
|
|
|
now = self.clock.time_msec()
|
2014-11-18 08:25:55 -07:00
|
|
|
|
2016-06-06 08:44:41 -06:00
|
|
|
try:
|
|
|
|
with Measure(self.clock, "presence_handle_timeouts"):
|
|
|
|
# Fetch the list of users that *may* have timed out. Things may have
|
|
|
|
# changed since the timeout was set, so we won't necessarily have to
|
|
|
|
# take any action.
|
|
|
|
users_to_check = set(self.wheel_timer.fetch(now))
|
|
|
|
|
|
|
|
# Check whether the lists of syncing processes from an external
|
|
|
|
# process have expired.
|
|
|
|
expired_process_ids = [
|
|
|
|
process_id for process_id, last_update
|
|
|
|
in self.external_process_last_updated_ms.items()
|
|
|
|
if now - last_update > EXTERNAL_PROCESS_EXPIRY
|
|
|
|
]
|
|
|
|
for process_id in expired_process_ids:
|
|
|
|
users_to_check.update(
|
|
|
|
self.external_process_last_updated_ms.pop(process_id, ())
|
|
|
|
)
|
|
|
|
self.external_process_last_update.pop(process_id)
|
|
|
|
|
|
|
|
states = [
|
|
|
|
self.user_to_current_state.get(
|
|
|
|
user_id, UserPresenceState.default(user_id)
|
|
|
|
)
|
|
|
|
for user_id in users_to_check
|
|
|
|
]
|
|
|
|
|
2018-05-21 18:47:37 -06:00
|
|
|
timers_fired_counter.inc(len(states))
|
2016-06-06 08:44:41 -06:00
|
|
|
|
|
|
|
changes = handle_timeouts(
|
|
|
|
states,
|
|
|
|
is_mine_fn=self.is_mine_id,
|
|
|
|
syncing_user_ids=self.get_currently_syncing_users(),
|
|
|
|
now=now,
|
2016-02-19 04:50:48 -07:00
|
|
|
)
|
2015-08-18 03:30:07 -06:00
|
|
|
|
2018-04-27 04:07:40 -06:00
|
|
|
run_in_background(self._update_states_and_catch_exception, changes)
|
2017-10-23 08:52:32 -06:00
|
|
|
except Exception:
|
2016-06-06 08:44:41 -06:00
|
|
|
logger.exception("Exception in _handle_timeouts loop")
|
2016-02-15 10:10:40 -07:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def bump_presence_active_time(self, user):
|
|
|
|
"""We've seen the user do something that indicates they're interacting
|
|
|
|
with the app.
|
2015-08-18 03:30:07 -06:00
|
|
|
"""
|
2018-08-17 09:08:45 -06:00
|
|
|
# If presence is disabled, no-op
|
|
|
|
if not self.hs.config.use_presence:
|
|
|
|
return
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
user_id = user.to_string()
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-19 04:32:04 -07:00
|
|
|
bump_active_time_counter.inc()
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
prev_state = yield self.current_state_for_user(user_id)
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-18 02:54:08 -07:00
|
|
|
new_fields = {
|
2016-02-18 03:11:43 -07:00
|
|
|
"last_active_ts": self.clock.time_msec(),
|
2016-02-18 02:54:08 -07:00
|
|
|
}
|
|
|
|
if prev_state.state == PresenceState.UNAVAILABLE:
|
|
|
|
new_fields["state"] = PresenceState.ONLINE
|
|
|
|
|
|
|
|
yield self._update_states([prev_state.copy_and_replace(**new_fields)])
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def user_syncing(self, user_id, affect_presence=True):
|
|
|
|
"""Returns a context manager that should surround any stream requests
|
|
|
|
from the user.
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
This allows us to keep track of who is currently streaming and who isn't
|
|
|
|
without having to have timers outside of this module to avoid flickering
|
|
|
|
when users disconnect/reconnect.
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
Args:
|
|
|
|
user_id (str)
|
|
|
|
affect_presence (bool): If false this function will be a no-op.
|
|
|
|
Useful for streams that are not associated with an actual
|
|
|
|
client that is being used by a user.
|
|
|
|
"""
|
2018-08-17 09:08:45 -06:00
|
|
|
# Override if it should affect the user's presence, if presence is
|
|
|
|
# disabled.
|
|
|
|
if not self.hs.config.use_presence:
|
|
|
|
affect_presence = False
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
if affect_presence:
|
|
|
|
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
|
|
|
|
self.user_to_num_current_syncs[user_id] = curr_sync + 1
|
|
|
|
|
|
|
|
prev_state = yield self.current_state_for_user(user_id)
|
|
|
|
if prev_state.state == PresenceState.OFFLINE:
|
|
|
|
# If they're currently offline then bring them online, otherwise
|
|
|
|
# just update the last sync times.
|
|
|
|
yield self._update_states([prev_state.copy_and_replace(
|
|
|
|
state=PresenceState.ONLINE,
|
2016-02-18 03:11:43 -07:00
|
|
|
last_active_ts=self.clock.time_msec(),
|
|
|
|
last_user_sync_ts=self.clock.time_msec(),
|
2016-02-15 10:10:40 -07:00
|
|
|
)])
|
|
|
|
else:
|
|
|
|
yield self._update_states([prev_state.copy_and_replace(
|
2016-02-18 03:11:43 -07:00
|
|
|
last_user_sync_ts=self.clock.time_msec(),
|
2016-02-15 10:10:40 -07:00
|
|
|
)])
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _end():
|
2018-04-27 04:07:40 -06:00
|
|
|
try:
|
2016-02-15 10:10:40 -07:00
|
|
|
self.user_to_num_current_syncs[user_id] -= 1
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
prev_state = yield self.current_state_for_user(user_id)
|
|
|
|
yield self._update_states([prev_state.copy_and_replace(
|
2016-02-18 03:11:43 -07:00
|
|
|
last_user_sync_ts=self.clock.time_msec(),
|
2016-02-15 10:10:40 -07:00
|
|
|
)])
|
2018-04-27 04:07:40 -06:00
|
|
|
except Exception:
|
|
|
|
logger.exception("Error updating presence after sync")
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
@contextmanager
|
|
|
|
def _user_syncing():
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
2018-04-27 04:07:40 -06:00
|
|
|
if affect_presence:
|
|
|
|
run_in_background(_end)
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
defer.returnValue(_user_syncing())
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2016-06-02 08:20:15 -06:00
|
|
|
def get_currently_syncing_users(self):
|
|
|
|
"""Get the set of user ids that are currently syncing on this HS.
|
|
|
|
Returns:
|
|
|
|
set(str): A set of user_id strings.
|
|
|
|
"""
|
2018-08-17 09:08:45 -06:00
|
|
|
if self.hs.config.use_presence:
|
|
|
|
syncing_user_ids = {
|
|
|
|
user_id for user_id, count in self.user_to_num_current_syncs.items()
|
|
|
|
if count
|
|
|
|
}
|
|
|
|
for user_ids in self.external_process_to_current_syncs.values():
|
|
|
|
syncing_user_ids.update(user_ids)
|
|
|
|
return syncing_user_ids
|
|
|
|
else:
|
|
|
|
return set()
|
2016-06-02 08:20:15 -06:00
|
|
|
|
2017-03-27 08:56:25 -06:00
|
|
|
@defer.inlineCallbacks
|
2017-03-31 04:46:20 -06:00
|
|
|
def update_external_syncs_row(self, process_id, user_id, is_syncing, sync_time_msec):
|
2017-03-27 08:56:25 -06:00
|
|
|
"""Update the syncing users for an external process as a delta.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
process_id (str): An identifier for the process the users are
|
|
|
|
syncing against. This allows synapse to process updates
|
|
|
|
as user start and stop syncing against a given process.
|
|
|
|
user_id (str): The user who has started or stopped syncing
|
|
|
|
is_syncing (bool): Whether or not the user is now syncing
|
2017-03-31 04:46:20 -06:00
|
|
|
sync_time_msec(int): Time in ms when the user was last syncing
|
2017-03-27 08:56:25 -06:00
|
|
|
"""
|
|
|
|
with (yield self.external_sync_linearizer.queue(process_id)):
|
|
|
|
prev_state = yield self.current_state_for_user(user_id)
|
|
|
|
|
|
|
|
process_presence = self.external_process_to_current_syncs.setdefault(
|
|
|
|
process_id, set()
|
|
|
|
)
|
|
|
|
|
|
|
|
updates = []
|
|
|
|
if is_syncing and user_id not in process_presence:
|
|
|
|
if prev_state.state == PresenceState.OFFLINE:
|
|
|
|
updates.append(prev_state.copy_and_replace(
|
|
|
|
state=PresenceState.ONLINE,
|
2017-03-31 04:46:20 -06:00
|
|
|
last_active_ts=sync_time_msec,
|
|
|
|
last_user_sync_ts=sync_time_msec,
|
2017-03-27 08:56:25 -06:00
|
|
|
))
|
|
|
|
else:
|
|
|
|
updates.append(prev_state.copy_and_replace(
|
2017-03-31 04:46:20 -06:00
|
|
|
last_user_sync_ts=sync_time_msec,
|
2017-03-27 08:56:25 -06:00
|
|
|
))
|
|
|
|
process_presence.add(user_id)
|
|
|
|
elif user_id in process_presence:
|
|
|
|
updates.append(prev_state.copy_and_replace(
|
2017-03-31 04:46:20 -06:00
|
|
|
last_user_sync_ts=sync_time_msec,
|
2017-03-27 08:56:25 -06:00
|
|
|
))
|
2017-03-31 04:36:32 -06:00
|
|
|
|
|
|
|
if not is_syncing:
|
2017-03-30 07:26:08 -06:00
|
|
|
process_presence.discard(user_id)
|
2017-03-27 08:56:25 -06:00
|
|
|
|
|
|
|
if updates:
|
|
|
|
yield self._update_states(updates)
|
|
|
|
|
2017-03-31 04:46:20 -06:00
|
|
|
self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
|
2017-03-27 08:56:25 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def update_external_syncs_clear(self, process_id):
|
|
|
|
"""Marks all users that had been marked as syncing by a given process
|
|
|
|
as offline.
|
|
|
|
|
|
|
|
Used when the process has stopped/disappeared.
|
|
|
|
"""
|
|
|
|
with (yield self.external_sync_linearizer.queue(process_id)):
|
|
|
|
process_presence = self.external_process_to_current_syncs.pop(
|
|
|
|
process_id, set()
|
|
|
|
)
|
|
|
|
prev_states = yield self.current_state_for_users(process_presence)
|
|
|
|
time_now_ms = self.clock.time_msec()
|
|
|
|
|
|
|
|
yield self._update_states([
|
|
|
|
prev_state.copy_and_replace(
|
|
|
|
last_user_sync_ts=time_now_ms,
|
|
|
|
)
|
2018-04-28 05:19:12 -06:00
|
|
|
for prev_state in itervalues(prev_states)
|
2017-03-27 08:56:25 -06:00
|
|
|
])
|
|
|
|
self.external_process_last_updated_ms.pop(process_id, None)
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def current_state_for_user(self, user_id):
|
|
|
|
"""Get the current presence state for a user.
|
|
|
|
"""
|
|
|
|
res = yield self.current_state_for_users([user_id])
|
|
|
|
defer.returnValue(res[user_id])
|
2015-08-17 03:40:23 -06:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-02-15 10:10:40 -07:00
|
|
|
def current_state_for_users(self, user_ids):
|
|
|
|
"""Get the current presence state for multiple users.
|
2014-08-22 11:02:45 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
Returns:
|
|
|
|
dict: `user_id` -> `UserPresenceState`
|
|
|
|
"""
|
|
|
|
states = {
|
|
|
|
user_id: self.user_to_current_state.get(user_id, None)
|
|
|
|
for user_id in user_ids
|
|
|
|
}
|
|
|
|
|
2018-04-28 05:19:12 -06:00
|
|
|
missing = [user_id for user_id, state in iteritems(states) if not state]
|
2016-02-15 10:10:40 -07:00
|
|
|
if missing:
|
|
|
|
# There are things not in our in memory cache. Lets pull them out of
|
|
|
|
# the database.
|
|
|
|
res = yield self.store.get_presence_for_users(missing)
|
2017-02-13 06:50:03 -07:00
|
|
|
states.update(res)
|
2016-02-15 10:10:40 -07:00
|
|
|
|
2018-04-28 05:19:12 -06:00
|
|
|
missing = [user_id for user_id, state in iteritems(states) if not state]
|
2016-02-15 10:10:40 -07:00
|
|
|
if missing:
|
2016-02-18 03:26:24 -07:00
|
|
|
new = {
|
2016-02-15 10:10:40 -07:00
|
|
|
user_id: UserPresenceState.default(user_id)
|
|
|
|
for user_id in missing
|
2016-02-18 03:26:24 -07:00
|
|
|
}
|
|
|
|
states.update(new)
|
|
|
|
self.user_to_current_state.update(new)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
defer.returnValue(states)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _persist_and_notify(self, states):
|
|
|
|
"""Persist states in the database, poke the notifier and send to
|
|
|
|
interested remote servers
|
|
|
|
"""
|
|
|
|
stream_id, max_token = yield self.store.update_presence(states)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2017-04-11 08:30:02 -06:00
|
|
|
parties = yield get_interested_parties(self.store, states)
|
2017-04-10 09:48:30 -06:00
|
|
|
room_ids_to_states, users_to_states = parties
|
2014-09-01 08:38:37 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
self.notifier.on_new_event(
|
|
|
|
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
2017-04-10 09:48:30 -06:00
|
|
|
users=[UserID.from_string(u) for u in users_to_states]
|
2014-11-20 09:24:00 -07:00
|
|
|
)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2017-04-10 09:48:30 -06:00
|
|
|
self._push_to_remotes(states)
|
2016-02-15 10:10:40 -07:00
|
|
|
|
2016-08-12 08:31:44 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def notify_for_states(self, state, stream_id):
|
2017-04-11 08:30:02 -06:00
|
|
|
parties = yield get_interested_parties(self.store, [state])
|
2017-04-10 09:48:30 -06:00
|
|
|
room_ids_to_states, users_to_states = parties
|
2016-08-12 08:31:44 -06:00
|
|
|
|
|
|
|
self.notifier.on_new_event(
|
|
|
|
"presence_key", stream_id, rooms=room_ids_to_states.keys(),
|
2017-04-10 09:48:30 -06:00
|
|
|
users=[UserID.from_string(u) for u in users_to_states]
|
2016-08-12 08:31:44 -06:00
|
|
|
)
|
|
|
|
|
2017-04-10 09:48:30 -06:00
|
|
|
def _push_to_remotes(self, states):
|
2016-02-15 10:10:40 -07:00
|
|
|
"""Sends state updates to remote servers.
|
|
|
|
|
|
|
|
Args:
|
2017-04-12 03:11:43 -06:00
|
|
|
states (list(UserPresenceState))
|
2016-02-15 10:10:40 -07:00
|
|
|
"""
|
2017-04-10 09:48:30 -06:00
|
|
|
self.federation.send_presence(states)
|
2016-02-15 10:10:40 -07:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def incoming_presence(self, origin, content):
|
|
|
|
"""Called when we receive a `m.presence` EDU from a remote server.
|
|
|
|
"""
|
|
|
|
now = self.clock.time_msec()
|
|
|
|
updates = []
|
|
|
|
for push in content.get("push", []):
|
|
|
|
# A "push" contains a list of presence that we are probably interested
|
|
|
|
# in.
|
|
|
|
# TODO: Actually check if we're interested, rather than blindly
|
|
|
|
# accepting presence updates.
|
|
|
|
user_id = push.get("user_id", None)
|
|
|
|
if not user_id:
|
|
|
|
logger.info(
|
|
|
|
"Got presence update from %r with no 'user_id': %r",
|
|
|
|
origin, push,
|
|
|
|
)
|
|
|
|
continue
|
2016-09-08 08:04:46 -06:00
|
|
|
|
|
|
|
if get_domain_from_id(user_id) != origin:
|
|
|
|
logger.info(
|
|
|
|
"Got presence update from %r with bad 'user_id': %r",
|
|
|
|
origin, user_id,
|
|
|
|
)
|
|
|
|
continue
|
2014-08-13 12:18:55 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
presence_state = push.get("presence", None)
|
|
|
|
if not presence_state:
|
|
|
|
logger.info(
|
|
|
|
"Got presence update from %r with no 'presence_state': %r",
|
|
|
|
origin, push,
|
|
|
|
)
|
|
|
|
continue
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
new_fields = {
|
|
|
|
"state": presence_state,
|
2016-02-18 03:11:43 -07:00
|
|
|
"last_federation_update_ts": now,
|
2016-02-15 10:10:40 -07:00
|
|
|
}
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
last_active_ago = push.get("last_active_ago", None)
|
|
|
|
if last_active_ago is not None:
|
2016-02-18 03:11:43 -07:00
|
|
|
new_fields["last_active_ts"] = now - last_active_ago
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
new_fields["status_msg"] = push.get("status_msg", None)
|
2016-02-23 03:40:11 -07:00
|
|
|
new_fields["currently_active"] = push.get("currently_active", False)
|
2014-09-01 09:16:35 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
prev_state = yield self.current_state_for_user(user_id)
|
|
|
|
updates.append(prev_state.copy_and_replace(**new_fields))
|
2015-03-23 11:25:44 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
if updates:
|
2018-05-21 18:47:37 -06:00
|
|
|
federation_presence_counter.inc(len(updates))
|
2016-02-15 10:10:40 -07:00
|
|
|
yield self._update_states(updates)
|
2014-09-01 09:16:35 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_state(self, target_user, as_event=False):
|
|
|
|
results = yield self.get_states(
|
|
|
|
[target_user.to_string()],
|
|
|
|
as_event=as_event,
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue(results[0])
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_states(self, target_user_ids, as_event=False):
|
|
|
|
"""Get the presence state for users.
|
2015-05-18 06:46:47 -06:00
|
|
|
|
|
|
|
Args:
|
2016-02-15 10:10:40 -07:00
|
|
|
target_user_ids (list)
|
|
|
|
as_event (bool): Whether to format it as a client event or not.
|
|
|
|
|
2015-05-18 06:46:47 -06:00
|
|
|
Returns:
|
2016-02-15 10:10:40 -07:00
|
|
|
list
|
2015-05-18 06:46:47 -06:00
|
|
|
"""
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
updates = yield self.current_state_for_users(target_user_ids)
|
2018-05-31 03:03:47 -06:00
|
|
|
updates = list(updates.values())
|
2015-05-18 08:46:37 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
for user_id in set(target_user_ids) - set(u.user_id for u in updates):
|
|
|
|
updates.append(UserPresenceState.default(user_id))
|
2015-05-18 06:46:47 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
now = self.clock.time_msec()
|
|
|
|
if as_event:
|
|
|
|
defer.returnValue([
|
|
|
|
{
|
|
|
|
"type": "m.presence",
|
2016-09-09 08:59:08 -06:00
|
|
|
"content": format_user_presence_state(state, now),
|
2016-02-15 10:10:40 -07:00
|
|
|
}
|
|
|
|
for state in updates
|
|
|
|
])
|
|
|
|
else:
|
2017-03-15 08:27:34 -06:00
|
|
|
defer.returnValue(updates)
|
2016-02-15 10:10:40 -07:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2016-08-10 05:57:30 -06:00
|
|
|
def set_state(self, target_user, state, ignore_status_msg=False):
|
2016-02-15 10:10:40 -07:00
|
|
|
"""Set the presence state of the user.
|
2015-05-18 06:46:47 -06:00
|
|
|
"""
|
2016-02-15 10:10:40 -07:00
|
|
|
status_msg = state.get("status_msg", None)
|
|
|
|
presence = state["presence"]
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-18 02:16:32 -07:00
|
|
|
valid_presence = (
|
|
|
|
PresenceState.ONLINE, PresenceState.UNAVAILABLE, PresenceState.OFFLINE
|
|
|
|
)
|
|
|
|
if presence not in valid_presence:
|
|
|
|
raise SynapseError(400, "Invalid presence state")
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
user_id = target_user.to_string()
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
prev_state = yield self.current_state_for_user(user_id)
|
|
|
|
|
|
|
|
new_fields = {
|
2016-08-10 05:57:30 -06:00
|
|
|
"state": presence
|
2016-02-15 10:10:40 -07:00
|
|
|
}
|
|
|
|
|
2016-08-10 05:57:30 -06:00
|
|
|
if not ignore_status_msg:
|
|
|
|
msg = status_msg if presence != PresenceState.OFFLINE else None
|
|
|
|
new_fields["status_msg"] = msg
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
if presence == PresenceState.ONLINE:
|
2016-02-18 03:11:43 -07:00
|
|
|
new_fields["last_active_ts"] = self.clock.time_msec()
|
2016-02-15 10:10:40 -07:00
|
|
|
|
|
|
|
yield self._update_states([prev_state.copy_and_replace(**new_fields)])
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2014-08-28 11:43:03 -06:00
|
|
|
@defer.inlineCallbacks
|
2016-02-15 10:10:40 -07:00
|
|
|
def is_visible(self, observed_user, observer_user):
|
2016-02-18 02:09:50 -07:00
|
|
|
"""Returns whether a user can see another user's presence.
|
|
|
|
"""
|
2017-03-16 05:51:46 -06:00
|
|
|
observer_room_ids = yield self.store.get_rooms_for_user(
|
|
|
|
observer_user.to_string()
|
|
|
|
)
|
|
|
|
observed_room_ids = yield self.store.get_rooms_for_user(
|
|
|
|
observed_user.to_string()
|
|
|
|
)
|
2014-08-28 11:43:03 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
if observer_room_ids & observed_room_ids:
|
|
|
|
defer.returnValue(True)
|
2014-08-28 11:43:03 -06:00
|
|
|
|
2019-04-03 04:11:15 -06:00
|
|
|
defer.returnValue(False)
|
2014-08-28 11:43:03 -06:00
|
|
|
|
2016-03-01 07:49:41 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_all_presence_updates(self, last_id, current_id):
|
|
|
|
"""
|
|
|
|
Gets a list of presence update rows from between the given stream ids.
|
|
|
|
Each row has:
|
|
|
|
- stream_id(str)
|
|
|
|
- user_id(str)
|
|
|
|
- state(str)
|
|
|
|
- last_active_ts(int)
|
|
|
|
- last_federation_update_ts(int)
|
|
|
|
- last_user_sync_ts(int)
|
|
|
|
- status_msg(int)
|
|
|
|
- currently_active(int)
|
|
|
|
"""
|
|
|
|
# TODO(markjh): replicate the unpersisted changes.
|
|
|
|
# This could use the in-memory stores for recent changes.
|
|
|
|
rows = yield self.store.get_all_presence_updates(last_id, current_id)
|
|
|
|
defer.returnValue(rows)
|
|
|
|
|
2019-03-26 06:45:22 -06:00
|
|
|
def notify_new_event(self):
|
|
|
|
"""Called when new events have happened. Handles users and servers
|
|
|
|
joining rooms and require being sent presence.
|
|
|
|
"""
|
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
if self._event_processing:
|
|
|
|
return
|
|
|
|
|
2019-03-26 06:45:22 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _process_presence():
|
2019-03-28 07:55:21 -06:00
|
|
|
assert not self._event_processing
|
2019-03-26 06:45:22 -06:00
|
|
|
|
|
|
|
self._event_processing = True
|
|
|
|
try:
|
|
|
|
yield self._unsafe_process()
|
|
|
|
finally:
|
|
|
|
self._event_processing = False
|
|
|
|
|
|
|
|
run_as_background_process("presence.notify_new_event", _process_presence)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _unsafe_process(self):
|
|
|
|
# Loop round handling deltas until we're up to date
|
|
|
|
while True:
|
|
|
|
with Measure(self.clock, "presence_delta"):
|
|
|
|
deltas = yield self.store.get_current_state_deltas(self._event_pos)
|
|
|
|
if not deltas:
|
|
|
|
return
|
|
|
|
|
|
|
|
yield self._handle_state_delta(deltas)
|
|
|
|
|
|
|
|
self._event_pos = deltas[-1]["stream_id"]
|
|
|
|
|
|
|
|
# Expose current event processing position to prometheus
|
|
|
|
synapse.metrics.event_processing_positions.labels("presence").set(
|
|
|
|
self._event_pos
|
|
|
|
)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _handle_state_delta(self, deltas):
|
|
|
|
"""Process current state deltas to find new joins that need to be
|
|
|
|
handled.
|
|
|
|
"""
|
|
|
|
for delta in deltas:
|
|
|
|
typ = delta["type"]
|
|
|
|
state_key = delta["state_key"]
|
|
|
|
room_id = delta["room_id"]
|
|
|
|
event_id = delta["event_id"]
|
|
|
|
prev_event_id = delta["prev_event_id"]
|
|
|
|
|
|
|
|
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
|
|
|
|
|
|
|
if typ != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
|
2019-04-26 04:13:16 -06:00
|
|
|
if event_id is None:
|
|
|
|
# state has been deleted, so this is not a join. We only care about
|
|
|
|
# joins.
|
|
|
|
continue
|
|
|
|
|
2019-03-26 06:45:22 -06:00
|
|
|
event = yield self.store.get_event(event_id)
|
|
|
|
if event.content.get("membership") != Membership.JOIN:
|
|
|
|
# We only care about joins
|
|
|
|
continue
|
|
|
|
|
|
|
|
if prev_event_id:
|
|
|
|
prev_event = yield self.store.get_event(prev_event_id)
|
|
|
|
if prev_event.content.get("membership") == Membership.JOIN:
|
|
|
|
# Ignore changes to join events.
|
|
|
|
continue
|
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
yield self._on_user_joined_room(room_id, state_key)
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _on_user_joined_room(self, room_id, user_id):
|
|
|
|
"""Called when we detect a user joining the room via the current state
|
|
|
|
delta stream.
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
user_id (str)
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
Returns:
|
|
|
|
Deferred
|
|
|
|
"""
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
if self.is_mine_id(user_id):
|
|
|
|
# If this is a local user then we need to send their presence
|
|
|
|
# out to hosts in the room (who don't already have it)
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
# TODO: We should be able to filter the hosts down to those that
|
|
|
|
# haven't previously seen the user
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
state = yield self.current_state_for_user(user_id)
|
|
|
|
hosts = yield self.state.get_current_hosts_in_room(room_id)
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
# Filter out ourselves.
|
|
|
|
hosts = set(host for host in hosts if host != self.server_name)
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2019-03-28 07:48:41 -06:00
|
|
|
self.federation.send_presence_to_destinations(
|
|
|
|
states=[state],
|
|
|
|
destinations=hosts,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
# A remote user has joined the room, so we need to:
|
|
|
|
# 1. Check if this is a new server in the room
|
|
|
|
# 2. If so send any presence they don't already have for
|
|
|
|
# local users in the room.
|
|
|
|
|
|
|
|
# TODO: We should be able to filter the users down to those that
|
|
|
|
# the server hasn't previously seen
|
|
|
|
|
|
|
|
# TODO: Check that this is actually a new server joining the
|
|
|
|
# room.
|
|
|
|
|
2019-04-03 07:32:20 -06:00
|
|
|
user_ids = yield self.state.get_current_users_in_room(room_id)
|
2019-03-28 07:48:41 -06:00
|
|
|
user_ids = list(filter(self.is_mine_id, user_ids))
|
|
|
|
|
|
|
|
states = yield self.current_state_for_users(user_ids)
|
|
|
|
|
|
|
|
# Filter out old presence, i.e. offline presence states where
|
|
|
|
# the user hasn't been active for a week. We can change this
|
|
|
|
# depending on what we want the UX to be, but at the least we
|
|
|
|
# should filter out offline presence where the state is just the
|
|
|
|
# default state.
|
|
|
|
now = self.clock.time_msec()
|
|
|
|
states = [
|
|
|
|
state for state in states.values()
|
|
|
|
if state.state != PresenceState.OFFLINE
|
|
|
|
or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000
|
|
|
|
or state.status_msg is not None
|
|
|
|
]
|
|
|
|
|
|
|
|
if states:
|
|
|
|
self.federation.send_presence_to_destinations(
|
|
|
|
states=states,
|
|
|
|
destinations=[get_domain_from_id(user_id)],
|
|
|
|
)
|
2019-03-26 06:45:22 -06:00
|
|
|
|
2014-08-28 11:43:03 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
def should_notify(old_state, new_state):
|
|
|
|
"""Decides if a presence state change should be sent to interested parties.
|
|
|
|
"""
|
2016-09-06 03:28:35 -06:00
|
|
|
if old_state == new_state:
|
|
|
|
return False
|
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
if old_state.status_msg != new_state.status_msg:
|
2018-05-21 18:47:37 -06:00
|
|
|
notify_reason_counter.labels("status_msg_change").inc()
|
2016-02-15 10:10:40 -07:00
|
|
|
return True
|
2014-08-28 11:43:03 -06:00
|
|
|
|
2016-09-06 03:23:38 -06:00
|
|
|
if old_state.state != new_state.state:
|
2018-05-21 18:47:37 -06:00
|
|
|
notify_reason_counter.labels("state_change").inc()
|
|
|
|
state_transition_counter.labels(old_state.state, new_state.state).inc()
|
2016-09-06 03:23:38 -06:00
|
|
|
return True
|
2014-08-28 11:43:03 -06:00
|
|
|
|
2016-09-06 03:23:38 -06:00
|
|
|
if old_state.state == PresenceState.ONLINE:
|
2016-02-15 10:10:40 -07:00
|
|
|
if new_state.currently_active != old_state.currently_active:
|
2018-05-21 18:47:37 -06:00
|
|
|
notify_reason_counter.labels("current_active_change").inc()
|
2016-02-15 10:10:40 -07:00
|
|
|
return True
|
2015-05-14 10:48:12 -06:00
|
|
|
|
2016-08-30 08:00:14 -06:00
|
|
|
if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
|
|
|
# Only notify about last active bumps if we're not currently acive
|
2016-09-06 03:23:38 -06:00
|
|
|
if not new_state.currently_active:
|
2018-05-21 18:47:37 -06:00
|
|
|
notify_reason_counter.labels("last_active_change_online").inc()
|
2016-08-30 08:00:14 -06:00
|
|
|
return True
|
|
|
|
|
|
|
|
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
2016-02-15 10:10:40 -07:00
|
|
|
# Always notify for a transition where last active gets bumped.
|
2018-05-21 18:47:37 -06:00
|
|
|
notify_reason_counter.labels("last_active_change_not_online").inc()
|
2016-02-15 10:10:40 -07:00
|
|
|
return True
|
2015-05-14 10:48:12 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
return False
|
2015-05-14 08:29:58 -06:00
|
|
|
|
|
|
|
|
2017-03-15 08:27:34 -06:00
|
|
|
def format_user_presence_state(state, now, include_user_id=True):
|
2016-02-15 10:10:40 -07:00
|
|
|
"""Convert UserPresenceState to a format that can be sent down to clients
|
|
|
|
and to other servers.
|
2017-03-15 08:50:33 -06:00
|
|
|
|
|
|
|
The "user_id" is optional so that this function can be used to format presence
|
|
|
|
updates for client /sync responses and for federation /send requests.
|
2016-02-15 10:10:40 -07:00
|
|
|
"""
|
|
|
|
content = {
|
|
|
|
"presence": state.state,
|
|
|
|
}
|
2017-03-15 08:27:34 -06:00
|
|
|
if include_user_id:
|
|
|
|
content["user_id"] = state.user_id
|
2016-02-18 03:11:43 -07:00
|
|
|
if state.last_active_ts:
|
|
|
|
content["last_active_ago"] = now - state.last_active_ts
|
2016-02-15 10:10:40 -07:00
|
|
|
if state.status_msg and state.state != PresenceState.OFFLINE:
|
|
|
|
content["status_msg"] = state.status_msg
|
|
|
|
if state.state == PresenceState.ONLINE:
|
|
|
|
content["currently_active"] = state.currently_active
|
2015-05-14 08:29:58 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
return content
|
2015-05-14 08:29:58 -06:00
|
|
|
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2014-08-29 10:09:15 -06:00
|
|
|
class PresenceEventSource(object):
|
|
|
|
def __init__(self, hs):
|
2016-05-16 12:08:40 -06:00
|
|
|
# We can't call get_presence_handler here because there's a cycle:
|
|
|
|
#
|
|
|
|
# Presence -> Notifier -> PresenceEventSource -> Presence
|
|
|
|
#
|
|
|
|
self.get_presence_handler = hs.get_presence_handler
|
2014-08-29 10:09:15 -06:00
|
|
|
self.clock = hs.get_clock()
|
2016-02-15 10:10:40 -07:00
|
|
|
self.store = hs.get_datastore()
|
2016-08-26 07:54:30 -06:00
|
|
|
self.state = hs.get_state_handler()
|
2014-08-29 10:09:15 -06:00
|
|
|
|
2014-09-02 09:29:04 -06:00
|
|
|
@defer.inlineCallbacks
|
2014-12-03 12:48:14 -07:00
|
|
|
@log_function
|
2016-02-15 10:10:40 -07:00
|
|
|
def get_new_events(self, user, from_key, room_ids=None, include_offline=True,
|
2017-02-02 06:07:18 -07:00
|
|
|
explicit_room_id=None, **kwargs):
|
2016-02-15 10:10:40 -07:00
|
|
|
# The process for getting presence events are:
|
|
|
|
# 1. Get the rooms the user is in.
|
|
|
|
# 2. Get the list of user in the rooms.
|
|
|
|
# 3. Get the list of users that are in the user's presence list.
|
|
|
|
# 4. If there is a from_key set, cross reference the list of users
|
|
|
|
# with the `presence_stream_cache` to see which ones we actually
|
|
|
|
# need to check.
|
|
|
|
# 5. Load current state for the users.
|
|
|
|
#
|
|
|
|
# We don't try and limit the presence updates by the current token, as
|
|
|
|
# sending down the rare duplicate is not a concern.
|
|
|
|
|
2016-02-23 07:03:46 -07:00
|
|
|
with Measure(self.clock, "presence.get_new_events"):
|
2016-02-23 06:49:16 -07:00
|
|
|
if from_key is not None:
|
|
|
|
from_key = int(from_key)
|
2014-08-29 10:09:15 -06:00
|
|
|
|
2016-05-16 12:08:40 -06:00
|
|
|
presence = self.get_presence_handler()
|
2016-02-23 07:48:23 -07:00
|
|
|
stream_change_cache = self.store.presence_stream_cache
|
2015-05-18 09:21:51 -06:00
|
|
|
|
2016-02-23 08:47:37 -07:00
|
|
|
max_token = self.store.get_current_presence_token()
|
|
|
|
|
2017-03-15 09:29:19 -06:00
|
|
|
users_interested_in = yield self._get_interested_in(user, explicit_room_id)
|
2016-02-23 07:48:23 -07:00
|
|
|
|
|
|
|
user_ids_changed = set()
|
2016-02-23 08:05:37 -07:00
|
|
|
changed = None
|
2016-06-03 06:40:55 -06:00
|
|
|
if from_key:
|
2016-02-23 07:48:23 -07:00
|
|
|
changed = stream_change_cache.get_all_entities_changed(from_key)
|
|
|
|
|
2016-06-03 06:49:16 -06:00
|
|
|
if changed is not None and len(changed) < 500:
|
2016-06-03 06:40:55 -06:00
|
|
|
# For small deltas, its quicker to get all changes and then
|
|
|
|
# work out if we share a room or they're in our presence list
|
2018-05-21 18:47:37 -06:00
|
|
|
get_updates_counter.labels("stream").inc()
|
2016-02-23 07:48:23 -07:00
|
|
|
for other_user_id in changed:
|
2017-02-02 06:07:18 -07:00
|
|
|
if other_user_id in users_interested_in:
|
2016-02-23 07:48:23 -07:00
|
|
|
user_ids_changed.add(other_user_id)
|
2016-02-23 06:49:16 -07:00
|
|
|
else:
|
2016-02-23 07:57:45 -07:00
|
|
|
# Too many possible updates. Find all users we can see and check
|
|
|
|
# if any of them have changed.
|
2018-05-21 18:47:37 -06:00
|
|
|
get_updates_counter.labels("full").inc()
|
2016-06-03 06:40:55 -06:00
|
|
|
|
2016-02-23 07:48:23 -07:00
|
|
|
if from_key:
|
|
|
|
user_ids_changed = stream_change_cache.get_entities_changed(
|
2017-02-02 06:07:18 -07:00
|
|
|
users_interested_in, from_key,
|
2016-02-23 07:48:23 -07:00
|
|
|
)
|
|
|
|
else:
|
2017-02-02 06:07:18 -07:00
|
|
|
user_ids_changed = users_interested_in
|
2014-08-29 10:09:15 -06:00
|
|
|
|
2016-02-23 06:49:16 -07:00
|
|
|
updates = yield presence.current_state_for_users(user_ids_changed)
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2017-03-15 08:27:34 -06:00
|
|
|
if include_offline:
|
2018-05-31 03:03:47 -06:00
|
|
|
defer.returnValue((list(updates.values()), max_token))
|
2017-03-15 08:27:34 -06:00
|
|
|
else:
|
|
|
|
defer.returnValue(([
|
2018-04-28 05:19:12 -06:00
|
|
|
s for s in itervalues(updates)
|
2017-03-15 08:27:34 -06:00
|
|
|
if s.state != PresenceState.OFFLINE
|
|
|
|
], max_token))
|
2014-08-12 08:10:52 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
def get_current_key(self):
|
|
|
|
return self.store.get_current_presence_token()
|
2014-08-13 12:18:55 -06:00
|
|
|
|
2016-02-15 10:10:40 -07:00
|
|
|
def get_pagination_rows(self, user, pagination_config, key):
|
|
|
|
return self.get_new_events(user, from_key=None, include_offline=False)
|
2016-02-18 04:52:33 -07:00
|
|
|
|
2017-03-15 09:29:19 -06:00
|
|
|
@cachedInlineCallbacks(num_args=2, cache_context=True)
|
|
|
|
def _get_interested_in(self, user, explicit_room_id, cache_context):
|
|
|
|
"""Returns the set of users that the given user should see presence
|
|
|
|
updates for
|
|
|
|
"""
|
|
|
|
user_id = user.to_string()
|
2019-04-03 04:11:15 -06:00
|
|
|
users_interested_in = set()
|
2017-03-15 09:29:19 -06:00
|
|
|
users_interested_in.add(user_id) # So that we receive our own presence
|
|
|
|
|
|
|
|
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
|
|
|
|
user_id, on_invalidate=cache_context.invalidate,
|
|
|
|
)
|
|
|
|
users_interested_in.update(users_who_share_room)
|
|
|
|
|
|
|
|
if explicit_room_id:
|
|
|
|
user_ids = yield self.store.get_users_in_room(
|
|
|
|
explicit_room_id, on_invalidate=cache_context.invalidate,
|
|
|
|
)
|
|
|
|
users_interested_in.update(user_ids)
|
|
|
|
|
|
|
|
defer.returnValue(users_interested_in)
|
|
|
|
|
2016-02-18 04:52:33 -07:00
|
|
|
|
2016-06-02 08:20:15 -06:00
|
|
|
def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now):
|
2016-02-18 04:52:33 -07:00
|
|
|
"""Checks the presence of users that have timed out and updates as
|
|
|
|
appropriate.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_states(list): List of UserPresenceState's to check.
|
|
|
|
is_mine_fn (fn): Function that returns if a user_id is ours
|
2016-06-02 08:20:15 -06:00
|
|
|
syncing_user_ids (set): Set of user_ids with active syncs.
|
2016-02-18 04:52:33 -07:00
|
|
|
now (int): Current time in ms.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List of UserPresenceState updates
|
|
|
|
"""
|
|
|
|
changes = {} # Actual changes we need to notify people about
|
|
|
|
|
|
|
|
for state in user_states:
|
|
|
|
is_mine = is_mine_fn(state.user_id)
|
|
|
|
|
2016-06-02 08:20:15 -06:00
|
|
|
new_state = handle_timeout(state, is_mine, syncing_user_ids, now)
|
2016-02-18 04:52:33 -07:00
|
|
|
if new_state:
|
|
|
|
changes[state.user_id] = new_state
|
|
|
|
|
2018-05-31 03:03:47 -06:00
|
|
|
return list(changes.values())
|
2016-02-18 04:52:33 -07:00
|
|
|
|
|
|
|
|
2016-06-02 08:20:15 -06:00
|
|
|
def handle_timeout(state, is_mine, syncing_user_ids, now):
|
2016-02-18 04:52:33 -07:00
|
|
|
"""Checks the presence of the user to see if any of the timers have elapsed
|
|
|
|
|
|
|
|
Args:
|
|
|
|
state (UserPresenceState)
|
|
|
|
is_mine (bool): Whether the user is ours
|
2016-06-02 08:20:15 -06:00
|
|
|
syncing_user_ids (set): Set of user_ids with active syncs.
|
2016-02-18 04:52:33 -07:00
|
|
|
now (int): Current time in ms.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A UserPresenceState update or None if no update.
|
|
|
|
"""
|
|
|
|
if state.state == PresenceState.OFFLINE:
|
|
|
|
# No timeouts are associated with offline states.
|
|
|
|
return None
|
|
|
|
|
|
|
|
changed = False
|
|
|
|
user_id = state.user_id
|
|
|
|
|
|
|
|
if is_mine:
|
|
|
|
if state.state == PresenceState.ONLINE:
|
|
|
|
if now - state.last_active_ts > IDLE_TIMER:
|
|
|
|
# Currently online, but last activity ages ago so auto
|
|
|
|
# idle
|
|
|
|
state = state.copy_and_replace(
|
|
|
|
state=PresenceState.UNAVAILABLE,
|
|
|
|
)
|
|
|
|
changed = True
|
|
|
|
elif now - state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
|
|
|
# So that we send down a notification that we've
|
|
|
|
# stopped updating.
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
if now - state.last_federation_update_ts > FEDERATION_PING_INTERVAL:
|
|
|
|
# Need to send ping to other servers to ensure they don't
|
|
|
|
# timeout and set us to offline
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
# If there are have been no sync for a while (and none ongoing),
|
|
|
|
# set presence to offline
|
2016-06-02 08:20:15 -06:00
|
|
|
if user_id not in syncing_user_ids:
|
2017-03-15 09:24:48 -06:00
|
|
|
# If the user has done something recently but hasn't synced,
|
|
|
|
# don't set them as offline.
|
2017-03-15 09:17:16 -06:00
|
|
|
sync_or_active = max(state.last_user_sync_ts, state.last_active_ts)
|
|
|
|
if now - sync_or_active > SYNC_ONLINE_TIMEOUT:
|
2016-02-18 04:52:33 -07:00
|
|
|
state = state.copy_and_replace(
|
|
|
|
state=PresenceState.OFFLINE,
|
|
|
|
status_msg=None,
|
|
|
|
)
|
|
|
|
changed = True
|
|
|
|
else:
|
2017-11-16 18:53:50 -07:00
|
|
|
# We expect to be poked occasionally by the other side.
|
2016-02-18 04:52:33 -07:00
|
|
|
# This is to protect against forgetful/buggy servers, so that
|
|
|
|
# no one gets stuck online forever.
|
|
|
|
if now - state.last_federation_update_ts > FEDERATION_TIMEOUT:
|
|
|
|
# The other side seems to have disappeared.
|
|
|
|
state = state.copy_and_replace(
|
|
|
|
state=PresenceState.OFFLINE,
|
|
|
|
status_msg=None,
|
|
|
|
)
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
return state if changed else None
|
|
|
|
|
|
|
|
|
|
|
|
def handle_update(prev_state, new_state, is_mine, wheel_timer, now):
|
|
|
|
"""Given a presence update:
|
|
|
|
1. Add any appropriate timers.
|
|
|
|
2. Check if we should notify anyone.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
prev_state (UserPresenceState)
|
|
|
|
new_state (UserPresenceState)
|
|
|
|
is_mine (bool): Whether the user is ours
|
|
|
|
wheel_timer (WheelTimer)
|
|
|
|
now (int): Time now in ms
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
3-tuple: `(new_state, persist_and_notify, federation_ping)` where:
|
|
|
|
- new_state: is the state to actually persist
|
|
|
|
- persist_and_notify (bool): whether to persist and notify people
|
|
|
|
- federation_ping (bool): whether we should send a ping over federation
|
|
|
|
"""
|
|
|
|
user_id = new_state.user_id
|
|
|
|
|
|
|
|
persist_and_notify = False
|
|
|
|
federation_ping = False
|
|
|
|
|
|
|
|
# If the users are ours then we want to set up a bunch of timers
|
|
|
|
# to time things out.
|
|
|
|
if is_mine:
|
|
|
|
if new_state.state == PresenceState.ONLINE:
|
|
|
|
# Idle timer
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=user_id,
|
|
|
|
then=new_state.last_active_ts + IDLE_TIMER
|
|
|
|
)
|
|
|
|
|
2016-02-19 03:58:27 -07:00
|
|
|
active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY
|
|
|
|
new_state = new_state.copy_and_replace(
|
|
|
|
currently_active=active,
|
|
|
|
)
|
|
|
|
|
|
|
|
if active:
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=user_id,
|
|
|
|
then=new_state.last_active_ts + LAST_ACTIVE_GRANULARITY
|
|
|
|
)
|
|
|
|
|
2016-02-18 04:52:33 -07:00
|
|
|
if new_state.state != PresenceState.OFFLINE:
|
|
|
|
# User has stopped syncing
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=user_id,
|
|
|
|
then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT
|
|
|
|
)
|
|
|
|
|
|
|
|
last_federate = new_state.last_federation_update_ts
|
|
|
|
if now - last_federate > FEDERATION_PING_INTERVAL:
|
|
|
|
# Been a while since we've poked remote servers
|
|
|
|
new_state = new_state.copy_and_replace(
|
|
|
|
last_federation_update_ts=now,
|
|
|
|
)
|
|
|
|
federation_ping = True
|
|
|
|
|
|
|
|
else:
|
|
|
|
wheel_timer.insert(
|
|
|
|
now=now,
|
|
|
|
obj=user_id,
|
|
|
|
then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT
|
|
|
|
)
|
|
|
|
|
|
|
|
# Check whether the change was something worth notifying about
|
|
|
|
if should_notify(prev_state, new_state):
|
|
|
|
new_state = new_state.copy_and_replace(
|
|
|
|
last_federation_update_ts=now,
|
|
|
|
)
|
|
|
|
persist_and_notify = True
|
|
|
|
|
|
|
|
return new_state, persist_and_notify, federation_ping
|
2017-04-11 08:19:26 -06:00
|
|
|
|
2017-04-11 08:30:02 -06:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_interested_parties(store, states):
|
|
|
|
"""Given a list of states return which entities (rooms, users)
|
|
|
|
are interested in the given states.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
states (list(UserPresenceState))
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
2-tuple: `(room_ids_to_states, users_to_states)`,
|
|
|
|
with each item being a dict of `entity_name` -> `[UserPresenceState]`
|
|
|
|
"""
|
|
|
|
room_ids_to_states = {}
|
|
|
|
users_to_states = {}
|
|
|
|
for state in states:
|
|
|
|
room_ids = yield store.get_rooms_for_user(state.user_id)
|
|
|
|
for room_id in room_ids:
|
|
|
|
room_ids_to_states.setdefault(room_id, []).append(state)
|
|
|
|
|
|
|
|
# Always notify self
|
|
|
|
users_to_states.setdefault(state.user_id, []).append(state)
|
|
|
|
|
|
|
|
defer.returnValue((room_ids_to_states, users_to_states))
|
|
|
|
|
|
|
|
|
2017-04-11 08:19:26 -06:00
|
|
|
@defer.inlineCallbacks
|
2017-05-16 07:46:16 -06:00
|
|
|
def get_interested_remotes(store, states, state_handler):
|
2017-04-11 08:19:26 -06:00
|
|
|
"""Given a list of presence states figure out which remote servers
|
|
|
|
should be sent which.
|
|
|
|
|
|
|
|
All the presence states should be for local users only.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
store (DataStore)
|
|
|
|
states (list(UserPresenceState))
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred list of ([destinations], [UserPresenceState]), where for
|
|
|
|
each row the list of UserPresenceState should be sent to each
|
|
|
|
destination
|
|
|
|
"""
|
2017-04-12 03:11:43 -06:00
|
|
|
hosts_and_states = []
|
2017-04-11 08:28:24 -06:00
|
|
|
|
2017-04-11 08:19:26 -06:00
|
|
|
# First we look up the rooms each user is in (as well as any explicit
|
|
|
|
# subscriptions), then for each distinct room we look up the remote
|
|
|
|
# hosts in those rooms.
|
2017-04-12 03:11:43 -06:00
|
|
|
room_ids_to_states, users_to_states = yield get_interested_parties(store, states)
|
2017-04-11 08:19:26 -06:00
|
|
|
|
2018-04-28 05:19:12 -06:00
|
|
|
for room_id, states in iteritems(room_ids_to_states):
|
2017-05-16 07:46:16 -06:00
|
|
|
hosts = yield state_handler.get_current_hosts_in_room(room_id)
|
2017-04-11 08:30:02 -06:00
|
|
|
hosts_and_states.append((hosts, states))
|
2017-04-11 08:19:26 -06:00
|
|
|
|
2018-04-28 05:19:12 -06:00
|
|
|
for user_id, states in iteritems(users_to_states):
|
2017-04-11 08:19:26 -06:00
|
|
|
host = get_domain_from_id(user_id)
|
2017-04-11 08:30:02 -06:00
|
|
|
hosts_and_states.append(([host], states))
|
2017-04-11 08:19:26 -06:00
|
|
|
|
|
|
|
defer.returnValue(hosts_and_states)
|