2014-08-26 11:57:46 -06:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-05 11:12:37 -07:00
|
|
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
2014-08-26 11:57:46 -06:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-11-19 09:40:01 -07:00
|
|
|
from twisted.internet import defer
|
2016-06-07 04:33:36 -06:00
|
|
|
from synapse.api.constants import EventTypes, Membership
|
2015-11-12 09:45:28 -07:00
|
|
|
from synapse.api.errors import AuthError
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2014-08-28 11:45:00 -06:00
|
|
|
from synapse.util.logutils import log_function
|
2016-02-04 03:22:44 -07:00
|
|
|
from synapse.util.async import ObservableDeferred
|
2016-08-23 08:23:39 -06:00
|
|
|
from synapse.util.logcontext import PreserveLoggingContext, preserve_fn
|
2016-08-19 10:33:12 -06:00
|
|
|
from synapse.util.metrics import Measure
|
2015-01-27 13:09:52 -07:00
|
|
|
from synapse.types import StreamToken
|
2016-05-11 06:42:37 -06:00
|
|
|
from synapse.visibility import filter_events_for_client
|
2015-03-05 08:12:39 -07:00
|
|
|
import synapse.metrics
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2016-01-29 08:21:38 -07:00
|
|
|
from collections import namedtuple
|
|
|
|
|
2014-08-26 11:57:46 -06:00
|
|
|
import logging
|
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2015-03-05 08:12:39 -07:00
|
|
|
metrics = synapse.metrics.get_metrics_for(__name__)
|
|
|
|
|
2015-03-10 12:06:24 -06:00
|
|
|
notified_events_counter = metrics.register_counter("notified_events")
|
|
|
|
|
2015-03-05 08:12:39 -07:00
|
|
|
|
|
|
|
# TODO(paul): Should be shared somewhere
|
|
|
|
def count(func, l):
|
|
|
|
"""Return the number of items in l for which func returns true."""
|
|
|
|
n = 0
|
|
|
|
for x in l:
|
|
|
|
if func(x):
|
|
|
|
n += 1
|
|
|
|
return n
|
|
|
|
|
2014-08-26 11:57:46 -06:00
|
|
|
|
|
|
|
class _NotificationListener(object):
|
2014-08-27 10:04:47 -06:00
|
|
|
""" This represents a single client connection to the events stream.
|
|
|
|
The events stream handler will have yielded to the deferred, so to
|
|
|
|
notify the handler it is sufficient to resolve the deferred.
|
|
|
|
"""
|
2015-06-18 08:49:05 -06:00
|
|
|
__slots__ = ["deferred"]
|
2014-08-27 10:04:47 -06:00
|
|
|
|
2015-05-12 04:00:37 -06:00
|
|
|
def __init__(self, deferred):
|
2015-06-18 09:09:53 -06:00
|
|
|
self.deferred = deferred
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2015-05-12 04:00:37 -06:00
|
|
|
|
|
|
|
class _NotifierUserStream(object):
|
|
|
|
"""This represents a user connected to the event stream.
|
|
|
|
It tracks the most recent stream token for that user.
|
|
|
|
At a given point a user may have a number of streams listening for
|
|
|
|
events.
|
|
|
|
|
|
|
|
This listener will also keep track of which rooms it is listening in
|
|
|
|
so that it can remove itself from the indexes in the Notifier class.
|
|
|
|
"""
|
|
|
|
|
2016-08-17 04:48:23 -06:00
|
|
|
def __init__(self, user_id, rooms, current_token, time_now_ms):
|
2016-01-20 08:34:07 -07:00
|
|
|
self.user_id = user_id
|
2015-05-13 06:42:21 -06:00
|
|
|
self.rooms = set(rooms)
|
2015-05-12 04:00:37 -06:00
|
|
|
self.current_token = current_token
|
2015-05-13 09:54:02 -06:00
|
|
|
self.last_notified_ms = time_now_ms
|
2015-05-12 04:00:37 -06:00
|
|
|
|
2016-02-04 03:22:44 -07:00
|
|
|
with PreserveLoggingContext():
|
|
|
|
self.notify_deferred = ObservableDeferred(defer.Deferred())
|
2015-06-18 08:49:05 -06:00
|
|
|
|
2015-05-13 09:54:02 -06:00
|
|
|
def notify(self, stream_key, stream_id, time_now_ms):
|
2015-05-14 07:35:07 -06:00
|
|
|
"""Notify any listeners for this user of a new event from an
|
|
|
|
event source.
|
|
|
|
Args:
|
|
|
|
stream_key(str): The stream the event came from.
|
|
|
|
stream_id(str): The new id for the stream the event came from.
|
|
|
|
time_now_ms(int): The current time in milliseconds.
|
|
|
|
"""
|
2015-05-18 06:17:36 -06:00
|
|
|
self.current_token = self.current_token.copy_and_advance(
|
2015-05-13 06:42:21 -06:00
|
|
|
stream_key, stream_id
|
|
|
|
)
|
2015-06-18 08:49:05 -06:00
|
|
|
self.last_notified_ms = time_now_ms
|
|
|
|
noify_deferred = self.notify_deferred
|
2016-02-04 03:22:44 -07:00
|
|
|
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
self.notify_deferred = ObservableDeferred(defer.Deferred())
|
|
|
|
noify_deferred.callback(self.current_token)
|
2015-05-12 04:00:37 -06:00
|
|
|
|
|
|
|
def remove(self, notifier):
|
|
|
|
""" Remove this listener from all the indexes in the Notifier
|
|
|
|
it knows about.
|
|
|
|
"""
|
2015-04-08 06:31:06 -06:00
|
|
|
|
2014-08-27 07:03:27 -06:00
|
|
|
for room in self.rooms:
|
2015-05-12 04:00:37 -06:00
|
|
|
lst = notifier.room_to_user_streams.get(room, set())
|
2014-08-27 07:03:27 -06:00
|
|
|
lst.discard(self)
|
|
|
|
|
2016-01-20 08:34:07 -07:00
|
|
|
notifier.user_to_user_stream.pop(self.user_id)
|
2015-04-08 06:31:06 -06:00
|
|
|
|
2015-06-18 08:49:05 -06:00
|
|
|
def count_listeners(self):
|
2015-06-19 09:22:53 -06:00
|
|
|
return len(self.notify_deferred.observers())
|
2015-06-18 08:49:05 -06:00
|
|
|
|
|
|
|
def new_listener(self, token):
|
|
|
|
"""Returns a deferred that is resolved when there is a new token
|
|
|
|
greater than the given token.
|
|
|
|
"""
|
|
|
|
if self.current_token.is_after(token):
|
|
|
|
return _NotificationListener(defer.succeed(self.current_token))
|
|
|
|
else:
|
|
|
|
return _NotificationListener(self.notify_deferred.observe())
|
|
|
|
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2016-01-29 08:21:38 -07:00
|
|
|
class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))):
|
|
|
|
def __nonzero__(self):
|
|
|
|
return bool(self.events)
|
|
|
|
|
|
|
|
|
2014-08-26 11:57:46 -06:00
|
|
|
class Notifier(object):
|
2014-08-27 10:04:47 -06:00
|
|
|
""" This class is responsible for notifying any listeners when there are
|
|
|
|
new events available for it.
|
|
|
|
|
|
|
|
Primarily used from the /events stream.
|
|
|
|
"""
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2015-05-13 09:54:02 -06:00
|
|
|
UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
|
|
|
|
|
2014-08-26 11:57:46 -06:00
|
|
|
def __init__(self, hs):
|
2015-05-12 04:00:37 -06:00
|
|
|
self.user_to_user_stream = {}
|
|
|
|
self.room_to_user_streams = {}
|
2014-08-26 11:57:46 -06:00
|
|
|
|
|
|
|
self.event_sources = hs.get_event_sources()
|
2015-05-12 04:00:37 -06:00
|
|
|
self.store = hs.get_datastore()
|
2015-05-13 06:42:21 -06:00
|
|
|
self.pending_new_room_events = []
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2014-11-19 09:37:43 -07:00
|
|
|
self.clock = hs.get_clock()
|
2016-05-31 06:53:48 -06:00
|
|
|
self.appservice_handler = hs.get_application_service_handler()
|
2016-11-23 07:09:47 -07:00
|
|
|
|
|
|
|
if hs.should_send_federation():
|
|
|
|
self.federation_sender = hs.get_federation_sender()
|
|
|
|
else:
|
|
|
|
self.federation_sender = None
|
|
|
|
|
2016-05-31 06:53:48 -06:00
|
|
|
self.state_handler = hs.get_state_handler()
|
2014-11-19 09:37:43 -07:00
|
|
|
|
2015-05-13 09:54:02 -06:00
|
|
|
self.clock.looping_call(
|
|
|
|
self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
|
|
|
|
)
|
|
|
|
|
2016-03-01 07:49:41 -07:00
|
|
|
self.replication_deferred = ObservableDeferred(defer.Deferred())
|
|
|
|
|
2015-03-05 08:12:39 -07:00
|
|
|
# This is not a very cheap test to perform, but it's only executed
|
|
|
|
# when rendering the metrics page, which is likely once per minute at
|
|
|
|
# most when scraping it.
|
|
|
|
def count_listeners():
|
2015-05-12 04:00:37 -06:00
|
|
|
all_user_streams = set()
|
2015-03-05 08:12:39 -07:00
|
|
|
|
2015-05-12 04:00:37 -06:00
|
|
|
for x in self.room_to_user_streams.values():
|
|
|
|
all_user_streams |= x
|
2015-05-26 08:03:49 -06:00
|
|
|
for x in self.user_to_user_stream.values():
|
2015-05-13 10:20:28 -06:00
|
|
|
all_user_streams.add(x)
|
2015-03-05 08:12:39 -07:00
|
|
|
|
2015-06-18 08:49:05 -06:00
|
|
|
return sum(stream.count_listeners() for stream in all_user_streams)
|
2015-03-06 09:18:21 -07:00
|
|
|
metrics.register_callback("listeners", count_listeners)
|
2015-03-05 08:12:39 -07:00
|
|
|
|
2015-03-12 10:24:38 -06:00
|
|
|
metrics.register_callback(
|
|
|
|
"rooms",
|
2015-05-12 04:00:37 -06:00
|
|
|
lambda: count(bool, self.room_to_user_streams.values()),
|
2015-03-05 08:12:39 -07:00
|
|
|
)
|
2015-03-12 10:24:38 -06:00
|
|
|
metrics.register_callback(
|
|
|
|
"users",
|
2015-05-12 04:00:37 -06:00
|
|
|
lambda: len(self.user_to_user_stream),
|
2015-03-05 08:12:39 -07:00
|
|
|
)
|
|
|
|
|
2016-08-23 08:23:39 -06:00
|
|
|
@preserve_fn
|
2015-05-13 06:42:21 -06:00
|
|
|
def on_new_room_event(self, event, room_stream_id, max_room_stream_id,
|
|
|
|
extra_users=[]):
|
2014-08-27 10:04:47 -06:00
|
|
|
""" Used by handlers to inform the notifier something has happened
|
|
|
|
in the room, room event wise.
|
|
|
|
|
|
|
|
This triggers the notifier to wake up any listeners that are
|
|
|
|
listening to the room, and any listeners for the users in the
|
|
|
|
`extra_users` param.
|
2015-05-14 07:35:07 -06:00
|
|
|
|
|
|
|
The events can be peristed out of order. The notifier will wait
|
|
|
|
until all previous events have been persisted before notifying
|
|
|
|
the client streams.
|
2014-08-27 10:04:47 -06:00
|
|
|
"""
|
2016-02-04 03:22:44 -07:00
|
|
|
with PreserveLoggingContext():
|
|
|
|
self.pending_new_room_events.append((
|
|
|
|
room_stream_id, event, extra_users
|
|
|
|
))
|
|
|
|
self._notify_pending_new_room_events(max_room_stream_id)
|
2015-05-14 07:35:07 -06:00
|
|
|
|
2016-03-01 07:49:41 -07:00
|
|
|
self.notify_replication()
|
|
|
|
|
2016-08-23 08:23:39 -06:00
|
|
|
@preserve_fn
|
2015-05-14 07:35:07 -06:00
|
|
|
def _notify_pending_new_room_events(self, max_room_stream_id):
|
|
|
|
"""Notify for the room events that were queued waiting for a previous
|
|
|
|
event to be persisted.
|
|
|
|
Args:
|
|
|
|
max_room_stream_id(int): The highest stream_id below which all
|
|
|
|
events have been persisted.
|
|
|
|
"""
|
2015-05-18 07:04:58 -06:00
|
|
|
pending = self.pending_new_room_events
|
2015-05-14 07:35:07 -06:00
|
|
|
self.pending_new_room_events = []
|
2015-05-18 06:17:36 -06:00
|
|
|
for room_stream_id, event, extra_users in pending:
|
2015-05-14 07:35:07 -06:00
|
|
|
if room_stream_id > max_room_stream_id:
|
|
|
|
self.pending_new_room_events.append((
|
2015-05-18 06:17:36 -06:00
|
|
|
room_stream_id, event, extra_users
|
2015-05-14 07:35:07 -06:00
|
|
|
))
|
|
|
|
else:
|
|
|
|
self._on_new_room_event(event, room_stream_id, extra_users)
|
2015-05-13 06:42:21 -06:00
|
|
|
|
2016-08-23 08:23:39 -06:00
|
|
|
@preserve_fn
|
2015-05-13 06:42:21 -06:00
|
|
|
def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
|
2015-05-14 07:35:07 -06:00
|
|
|
"""Notify any user streams that are interested in this room event"""
|
2015-02-05 06:19:46 -07:00
|
|
|
# poke any interested application service.
|
2016-08-18 04:54:41 -06:00
|
|
|
self.appservice_handler.notify_interested_services(room_stream_id)
|
2016-11-23 07:09:47 -07:00
|
|
|
|
|
|
|
if self.federation_sender:
|
|
|
|
self.federation_sender.notify_new_events(room_stream_id)
|
2015-02-05 06:19:46 -07:00
|
|
|
|
2016-06-07 04:33:36 -06:00
|
|
|
if event.type == EventTypes.Member and event.membership == Membership.JOIN:
|
|
|
|
self._user_joined_room(event.state_key, event.room_id)
|
|
|
|
|
2015-07-02 04:40:56 -06:00
|
|
|
self.on_new_event(
|
|
|
|
"room_key", room_stream_id,
|
|
|
|
users=extra_users,
|
|
|
|
rooms=[event.room_id],
|
|
|
|
)
|
2014-08-29 09:01:01 -06:00
|
|
|
|
2016-08-23 08:23:39 -06:00
|
|
|
@preserve_fn
|
2016-08-17 04:48:23 -06:00
|
|
|
def on_new_event(self, stream_key, new_token, users=[], rooms=[]):
|
2015-07-02 04:40:56 -06:00
|
|
|
""" Used to inform listeners that something has happend event wise.
|
2014-08-27 10:04:47 -06:00
|
|
|
|
|
|
|
Will wake up all listeners for the given users and rooms.
|
|
|
|
"""
|
2016-02-04 03:22:44 -07:00
|
|
|
with PreserveLoggingContext():
|
2016-08-19 10:33:12 -06:00
|
|
|
with Measure(self.clock, "on_new_event"):
|
|
|
|
user_streams = set()
|
2014-08-27 07:03:27 -06:00
|
|
|
|
2016-08-19 10:33:12 -06:00
|
|
|
for user in users:
|
|
|
|
user_stream = self.user_to_user_stream.get(str(user))
|
|
|
|
if user_stream is not None:
|
|
|
|
user_streams.add(user_stream)
|
2014-08-27 07:03:27 -06:00
|
|
|
|
2016-08-19 10:33:12 -06:00
|
|
|
for room in rooms:
|
|
|
|
user_streams |= self.room_to_user_streams.get(room, set())
|
2014-08-26 12:40:29 -06:00
|
|
|
|
2016-08-19 10:33:12 -06:00
|
|
|
time_now_ms = self.clock.time_msec()
|
|
|
|
for user_stream in user_streams:
|
|
|
|
try:
|
|
|
|
user_stream.notify(stream_key, new_token, time_now_ms)
|
|
|
|
except:
|
|
|
|
logger.exception("Failed to notify listener")
|
2014-08-29 09:01:01 -06:00
|
|
|
|
2016-08-19 10:33:12 -06:00
|
|
|
self.notify_replication()
|
2016-03-01 07:49:41 -07:00
|
|
|
|
2016-08-23 08:23:39 -06:00
|
|
|
@preserve_fn
|
2016-03-15 11:41:06 -06:00
|
|
|
def on_new_replication_data(self):
|
|
|
|
"""Used to inform replication listeners that something has happend
|
|
|
|
without waking up any of the normal user event streams"""
|
|
|
|
with PreserveLoggingContext():
|
2016-08-19 11:06:31 -06:00
|
|
|
self.notify_replication()
|
2016-03-15 11:41:06 -06:00
|
|
|
|
2015-01-27 13:09:52 -07:00
|
|
|
@defer.inlineCallbacks
|
2016-01-20 08:34:07 -07:00
|
|
|
def wait_for_events(self, user_id, timeout, callback, room_ids=None,
|
2016-03-03 07:57:45 -07:00
|
|
|
from_token=StreamToken.START):
|
2015-01-27 13:09:52 -07:00
|
|
|
"""Wait until the callback returns a non empty response or the
|
|
|
|
timeout fires.
|
|
|
|
"""
|
2016-01-20 08:34:07 -07:00
|
|
|
user_stream = self.user_to_user_stream.get(user_id)
|
2015-05-12 04:00:37 -06:00
|
|
|
if user_stream is None:
|
|
|
|
current_token = yield self.event_sources.get_current_token()
|
2015-11-05 07:32:26 -07:00
|
|
|
if room_ids is None:
|
2016-01-20 08:34:07 -07:00
|
|
|
rooms = yield self.store.get_rooms_for_user(user_id)
|
2015-11-05 07:32:26 -07:00
|
|
|
room_ids = [room.room_id for room in rooms]
|
2015-05-12 04:00:37 -06:00
|
|
|
user_stream = _NotifierUserStream(
|
2016-01-20 08:34:07 -07:00
|
|
|
user_id=user_id,
|
2015-11-05 07:32:26 -07:00
|
|
|
rooms=room_ids,
|
2015-05-12 04:00:37 -06:00
|
|
|
current_token=current_token,
|
2015-06-18 08:49:05 -06:00
|
|
|
time_now_ms=self.clock.time_msec(),
|
2015-05-12 04:00:37 -06:00
|
|
|
)
|
|
|
|
self._register_with_keys(user_stream)
|
|
|
|
|
2015-06-18 04:36:26 -06:00
|
|
|
result = None
|
2015-01-27 13:09:52 -07:00
|
|
|
if timeout:
|
2015-06-18 10:00:32 -06:00
|
|
|
# Will be set to a _NotificationListener that we'll be waiting on.
|
|
|
|
# Allows us to cancel it.
|
|
|
|
listener = None
|
2015-06-18 09:15:10 -06:00
|
|
|
|
|
|
|
def timed_out():
|
|
|
|
if listener:
|
|
|
|
listener.deferred.cancel()
|
2016-02-02 10:18:50 -07:00
|
|
|
timer = self.clock.call_later(timeout / 1000., timed_out)
|
2015-06-18 08:49:05 -06:00
|
|
|
|
|
|
|
prev_token = from_token
|
|
|
|
while not result:
|
2015-06-18 04:36:26 -06:00
|
|
|
try:
|
2015-06-18 08:49:05 -06:00
|
|
|
current_token = user_stream.current_token
|
|
|
|
|
|
|
|
result = yield callback(prev_token, current_token)
|
|
|
|
if result:
|
|
|
|
break
|
|
|
|
|
2015-06-18 09:15:10 -06:00
|
|
|
# Now we wait for the _NotifierUserStream to be told there
|
|
|
|
# is a new token.
|
|
|
|
# We need to supply the token we supplied to callback so
|
|
|
|
# that we don't miss any current_token updates.
|
2015-06-18 08:49:05 -06:00
|
|
|
prev_token = current_token
|
|
|
|
listener = user_stream.new_listener(prev_token)
|
2016-02-04 03:22:44 -07:00
|
|
|
with PreserveLoggingContext():
|
|
|
|
yield listener.deferred
|
2015-06-18 08:49:05 -06:00
|
|
|
except defer.CancelledError:
|
|
|
|
break
|
|
|
|
|
|
|
|
self.clock.cancel_call_later(timer, ignore_errs=True)
|
|
|
|
else:
|
|
|
|
current_token = user_stream.current_token
|
|
|
|
result = yield callback(from_token, current_token)
|
2015-04-09 04:07:20 -06:00
|
|
|
|
2015-01-27 13:09:52 -07:00
|
|
|
defer.returnValue(result)
|
|
|
|
|
2015-05-11 07:37:33 -06:00
|
|
|
@defer.inlineCallbacks
|
2015-11-02 08:10:59 -07:00
|
|
|
def get_events_for(self, user, pagination_config, timeout,
|
2016-01-21 09:10:37 -07:00
|
|
|
only_keys=None,
|
2016-01-20 08:34:07 -07:00
|
|
|
is_guest=False, explicit_room_id=None):
|
2014-08-27 10:04:47 -06:00
|
|
|
""" For the given user and rooms, return any new events for them. If
|
|
|
|
there are no new events wait for up to `timeout` milliseconds for any
|
|
|
|
new events to happen before returning.
|
2015-08-24 09:19:43 -06:00
|
|
|
|
2016-01-21 09:10:37 -07:00
|
|
|
If `only_keys` is not None, events from keys will be sent down.
|
2016-01-20 08:34:07 -07:00
|
|
|
|
|
|
|
If explicit_room_id is not set, the user's joined rooms will be polled
|
|
|
|
for events.
|
|
|
|
If explicit_room_id is set, that room will be polled for events only if
|
|
|
|
it is world readable or the user has joined the room.
|
2014-08-27 10:04:47 -06:00
|
|
|
"""
|
2015-05-11 07:37:33 -06:00
|
|
|
from_token = pagination_config.from_token
|
2014-08-26 11:57:46 -06:00
|
|
|
if not from_token:
|
|
|
|
from_token = yield self.event_sources.get_current_token()
|
|
|
|
|
2015-05-11 07:37:33 -06:00
|
|
|
limit = pagination_config.limit
|
2015-02-27 02:39:12 -07:00
|
|
|
|
2016-01-20 08:34:07 -07:00
|
|
|
room_ids, is_joined = yield self._get_room_ids(user, explicit_room_id)
|
|
|
|
is_peeking = not is_joined
|
2015-11-05 07:32:26 -07:00
|
|
|
|
2015-05-11 07:37:33 -06:00
|
|
|
@defer.inlineCallbacks
|
2015-05-13 08:08:24 -06:00
|
|
|
def check_for_updates(before_token, after_token):
|
2015-06-18 08:49:05 -06:00
|
|
|
if not after_token.is_after(before_token):
|
2016-01-31 06:31:15 -07:00
|
|
|
defer.returnValue(EventStreamResult([], (from_token, from_token)))
|
2015-06-18 08:49:05 -06:00
|
|
|
|
2015-05-11 07:37:33 -06:00
|
|
|
events = []
|
|
|
|
end_token = from_token
|
2015-11-12 09:45:28 -07:00
|
|
|
|
2015-05-11 07:37:33 -06:00
|
|
|
for name, source in self.event_sources.sources.items():
|
|
|
|
keyname = "%s_key" % name
|
2015-05-13 08:08:24 -06:00
|
|
|
before_id = getattr(before_token, keyname)
|
|
|
|
after_id = getattr(after_token, keyname)
|
|
|
|
if before_id == after_id:
|
|
|
|
continue
|
2016-01-21 09:10:37 -07:00
|
|
|
if only_keys and name not in only_keys:
|
2015-08-24 09:19:43 -06:00
|
|
|
continue
|
2016-01-31 06:31:15 -07:00
|
|
|
|
2015-11-05 07:32:26 -07:00
|
|
|
new_events, new_key = yield source.get_new_events(
|
|
|
|
user=user,
|
|
|
|
from_key=getattr(from_token, keyname),
|
2016-01-31 06:31:15 -07:00
|
|
|
limit=limit,
|
2016-01-20 08:34:07 -07:00
|
|
|
is_guest=is_peeking,
|
2015-11-05 07:32:26 -07:00
|
|
|
room_ids=room_ids,
|
2015-05-11 07:37:33 -06:00
|
|
|
)
|
2015-11-05 07:32:26 -07:00
|
|
|
|
2015-11-12 09:45:28 -07:00
|
|
|
if name == "room":
|
2016-05-11 06:42:37 -06:00
|
|
|
new_events = yield filter_events_for_client(
|
|
|
|
self.store,
|
2015-11-05 07:32:26 -07:00
|
|
|
user.to_string(),
|
|
|
|
new_events,
|
2016-01-20 08:34:07 -07:00
|
|
|
is_peeking=is_peeking,
|
2015-11-05 07:32:26 -07:00
|
|
|
)
|
|
|
|
|
2015-07-20 07:32:12 -06:00
|
|
|
events.extend(new_events)
|
2015-05-12 04:54:18 -06:00
|
|
|
end_token = end_token.copy_and_replace(keyname, new_key)
|
2014-11-20 10:26:36 -07:00
|
|
|
|
2016-01-29 08:21:38 -07:00
|
|
|
defer.returnValue(EventStreamResult(events, (from_token, end_token)))
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2016-01-20 08:34:07 -07:00
|
|
|
user_id_for_stream = user.to_string()
|
|
|
|
if is_peeking:
|
|
|
|
# Internally, the notifier keeps an event stream per user_id.
|
|
|
|
# This is used by both /sync and /events.
|
|
|
|
# We want /events to be used for peeking independently of /sync,
|
|
|
|
# without polluting its contents. So we invent an illegal user ID
|
|
|
|
# (which thus cannot clash with any real users) for keying peeking
|
|
|
|
# over /events.
|
|
|
|
#
|
|
|
|
# I am sorry for what I have done.
|
2016-01-21 06:22:26 -07:00
|
|
|
user_id_for_stream = "_PEEKING_%s_%s" % (
|
|
|
|
explicit_room_id, user_id_for_stream
|
|
|
|
)
|
2016-01-20 08:34:07 -07:00
|
|
|
|
2015-05-11 07:37:33 -06:00
|
|
|
result = yield self.wait_for_events(
|
2016-01-20 08:34:07 -07:00
|
|
|
user_id_for_stream,
|
|
|
|
timeout,
|
|
|
|
check_for_updates,
|
|
|
|
room_ids=room_ids,
|
|
|
|
from_token=from_token,
|
2015-05-11 07:37:33 -06:00
|
|
|
)
|
2014-08-27 10:21:30 -06:00
|
|
|
|
2015-05-11 07:37:33 -06:00
|
|
|
defer.returnValue(result)
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2016-01-20 08:34:07 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _get_room_ids(self, user, explicit_room_id):
|
|
|
|
joined_rooms = yield self.store.get_rooms_for_user(user.to_string())
|
|
|
|
joined_room_ids = map(lambda r: r.room_id, joined_rooms)
|
|
|
|
if explicit_room_id:
|
|
|
|
if explicit_room_id in joined_room_ids:
|
|
|
|
defer.returnValue(([explicit_room_id], True))
|
|
|
|
if (yield self._is_world_readable(explicit_room_id)):
|
|
|
|
defer.returnValue(([explicit_room_id], False))
|
|
|
|
raise AuthError(403, "Non-joined access not allowed")
|
|
|
|
defer.returnValue((joined_room_ids, True))
|
|
|
|
|
2015-11-12 09:45:28 -07:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _is_world_readable(self, room_id):
|
2016-05-31 06:53:48 -06:00
|
|
|
state = yield self.state_handler.get_current_state(
|
2015-11-12 09:45:28 -07:00
|
|
|
room_id,
|
2016-09-02 07:53:38 -06:00
|
|
|
EventTypes.RoomHistoryVisibility,
|
|
|
|
"",
|
2015-11-12 09:45:28 -07:00
|
|
|
)
|
|
|
|
if state and "history_visibility" in state.content:
|
|
|
|
defer.returnValue(state.content["history_visibility"] == "world_readable")
|
|
|
|
else:
|
|
|
|
defer.returnValue(False)
|
|
|
|
|
2015-05-13 09:54:02 -06:00
|
|
|
@log_function
|
|
|
|
def remove_expired_streams(self):
|
|
|
|
time_now_ms = self.clock.time_msec()
|
|
|
|
expired_streams = []
|
|
|
|
expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS
|
|
|
|
for stream in self.user_to_user_stream.values():
|
2015-06-18 08:49:05 -06:00
|
|
|
if stream.count_listeners():
|
2015-05-13 09:54:02 -06:00
|
|
|
continue
|
|
|
|
if stream.last_notified_ms < expire_before_ts:
|
|
|
|
expired_streams.append(stream)
|
|
|
|
|
|
|
|
for expired_stream in expired_streams:
|
|
|
|
expired_stream.remove(self)
|
|
|
|
|
2014-08-26 11:57:46 -06:00
|
|
|
@log_function
|
2015-05-12 04:00:37 -06:00
|
|
|
def _register_with_keys(self, user_stream):
|
2016-01-20 08:34:07 -07:00
|
|
|
self.user_to_user_stream[user_stream.user_id] = user_stream
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2015-05-12 04:00:37 -06:00
|
|
|
for room in user_stream.rooms:
|
2015-05-13 06:42:21 -06:00
|
|
|
s = self.room_to_user_streams.setdefault(room, set())
|
2015-05-12 04:00:37 -06:00
|
|
|
s.add(user_stream)
|
2014-08-26 11:57:46 -06:00
|
|
|
|
2016-06-07 04:33:36 -06:00
|
|
|
def _user_joined_room(self, user_id, room_id):
|
|
|
|
new_user_stream = self.user_to_user_stream.get(user_id)
|
2015-05-13 06:42:21 -06:00
|
|
|
if new_user_stream is not None:
|
|
|
|
room_streams = self.room_to_user_streams.setdefault(room_id, set())
|
|
|
|
room_streams.add(new_user_stream)
|
|
|
|
new_user_stream.rooms.add(room_id)
|
2016-03-01 07:49:41 -07:00
|
|
|
|
|
|
|
def notify_replication(self):
|
|
|
|
"""Notify the any replication listeners that there's a new event"""
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
deferred = self.replication_deferred
|
|
|
|
self.replication_deferred = ObservableDeferred(defer.Deferred())
|
|
|
|
deferred.callback(None)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def wait_for_replication(self, callback, timeout):
|
|
|
|
"""Wait for an event to happen.
|
|
|
|
|
2016-04-01 09:08:59 -06:00
|
|
|
Args:
|
|
|
|
callback: Gets called whenever an event happens. If this returns a
|
|
|
|
truthy value then ``wait_for_replication`` returns, otherwise
|
|
|
|
it waits for another event.
|
|
|
|
timeout: How many milliseconds to wait for callback return a truthy
|
|
|
|
value.
|
|
|
|
|
|
|
|
Returns:
|
2016-03-01 07:49:41 -07:00
|
|
|
A deferred that resolves with the value returned by the callback.
|
|
|
|
"""
|
|
|
|
listener = _NotificationListener(None)
|
|
|
|
|
|
|
|
def timed_out():
|
|
|
|
listener.deferred.cancel()
|
|
|
|
|
|
|
|
timer = self.clock.call_later(timeout / 1000., timed_out)
|
|
|
|
while True:
|
|
|
|
listener.deferred = self.replication_deferred.observe()
|
|
|
|
result = yield callback()
|
|
|
|
if result:
|
|
|
|
break
|
|
|
|
|
|
|
|
try:
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
yield listener.deferred
|
|
|
|
except defer.CancelledError:
|
|
|
|
break
|
|
|
|
|
|
|
|
self.clock.cancel_call_later(timer, ignore_errs=True)
|
|
|
|
|
|
|
|
defer.returnValue(result)
|