2016-05-11 06:42:37 -06:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-06-07 04:37:10 -06:00
|
|
|
import logging
|
2018-06-25 06:42:55 -06:00
|
|
|
import operator
|
2018-06-07 04:37:10 -06:00
|
|
|
|
2020-12-16 06:46:37 -07:00
|
|
|
from synapse.api.constants import (
|
|
|
|
AccountDataTypes,
|
|
|
|
EventTypes,
|
|
|
|
HistoryVisibility,
|
|
|
|
Membership,
|
|
|
|
)
|
2018-06-25 06:42:55 -06:00
|
|
|
from synapse.events.utils import prune_event
|
2019-10-23 10:25:54 -06:00
|
|
|
from synapse.storage import Storage
|
2018-10-25 10:49:55 -06:00
|
|
|
from synapse.storage.state import StateFilter
|
2018-07-16 04:38:45 -06:00
|
|
|
from synapse.types import get_domain_from_id
|
2016-05-11 06:42:37 -06:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2020-12-16 06:46:37 -07:00
|
|
|
VISIBILITY_PRIORITY = (
|
|
|
|
HistoryVisibility.WORLD_READABLE,
|
|
|
|
HistoryVisibility.SHARED,
|
|
|
|
HistoryVisibility.INVITED,
|
|
|
|
HistoryVisibility.JOINED,
|
|
|
|
)
|
2016-05-11 06:42:37 -06:00
|
|
|
|
|
|
|
|
|
|
|
MEMBERSHIP_PRIORITY = (
|
|
|
|
Membership.JOIN,
|
|
|
|
Membership.INVITE,
|
|
|
|
Membership.KNOCK,
|
|
|
|
Membership.LEAVE,
|
|
|
|
Membership.BAN,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2020-07-27 10:32:08 -06:00
|
|
|
async def filter_events_for_client(
|
2019-11-28 13:35:22 -07:00
|
|
|
storage: Storage,
|
|
|
|
user_id,
|
|
|
|
events,
|
|
|
|
is_peeking=False,
|
|
|
|
always_include_ids=frozenset(),
|
2020-03-11 09:21:25 -06:00
|
|
|
filter_send_to_client=True,
|
2019-06-20 03:32:02 -06:00
|
|
|
):
|
2018-06-07 04:37:10 -06:00
|
|
|
"""
|
2019-12-16 05:15:37 -07:00
|
|
|
Check which events a user is allowed to see. If the user can see the event but its
|
|
|
|
sender asked for their data to be erased, prune the content of the event.
|
2016-05-11 06:42:37 -06:00
|
|
|
|
|
|
|
Args:
|
2019-10-23 10:25:54 -06:00
|
|
|
storage
|
2018-06-07 04:37:10 -06:00
|
|
|
user_id(str): user id to be checked
|
|
|
|
events(list[synapse.events.EventBase]): sequence of events to be checked
|
|
|
|
is_peeking(bool): should be True if:
|
|
|
|
* the user is not currently a member of the room, and:
|
|
|
|
* the user has not been a member of the room since the given
|
|
|
|
events
|
2017-09-18 10:13:03 -06:00
|
|
|
always_include_ids (set(event_id)): set of event ids to specifically
|
|
|
|
include (unless sender is ignored)
|
2020-03-11 09:21:25 -06:00
|
|
|
filter_send_to_client (bool): Whether we're checking an event that's going to be
|
|
|
|
sent to a client. This might not always be the case since this function can
|
|
|
|
also be called to check whether a user can see the state at a given point.
|
2018-06-07 04:37:10 -06:00
|
|
|
|
|
|
|
Returns:
|
2020-07-27 10:32:08 -06:00
|
|
|
list[synapse.events.EventBase]
|
2016-05-11 06:42:37 -06:00
|
|
|
"""
|
2019-02-12 03:31:21 -07:00
|
|
|
# Filter out events that have been soft failed so that we don't relay them
|
|
|
|
# to clients.
|
2020-02-21 05:15:07 -07:00
|
|
|
events = [e for e in events if not e.internal_metadata.is_soft_failed()]
|
2019-02-12 03:31:21 -07:00
|
|
|
|
2019-06-20 03:32:02 -06:00
|
|
|
types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id))
|
2021-01-18 07:02:22 -07:00
|
|
|
|
2020-07-27 10:32:08 -06:00
|
|
|
event_id_to_state = await storage.state.get_state_for_events(
|
2018-06-07 04:37:10 -06:00
|
|
|
frozenset(e.event_id for e in events),
|
2018-10-25 10:49:55 -06:00
|
|
|
state_filter=StateFilter.from_types(types),
|
2018-06-07 04:37:10 -06:00
|
|
|
)
|
|
|
|
|
2020-07-27 10:32:08 -06:00
|
|
|
ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user(
|
2020-10-05 07:28:05 -06:00
|
|
|
AccountDataTypes.IGNORED_USER_LIST, user_id
|
2016-05-11 06:42:37 -06:00
|
|
|
)
|
|
|
|
|
2020-10-05 07:28:05 -06:00
|
|
|
ignore_list = frozenset()
|
|
|
|
if ignore_dict_content:
|
|
|
|
ignored_users_dict = ignore_dict_content.get("ignored_users", {})
|
|
|
|
if isinstance(ignored_users_dict, dict):
|
|
|
|
ignore_list = frozenset(ignored_users_dict.keys())
|
2016-05-11 06:42:37 -06:00
|
|
|
|
2020-07-27 10:32:08 -06:00
|
|
|
erased_senders = await storage.main.are_users_erased((e.sender for e in events))
|
2018-06-25 06:42:55 -06:00
|
|
|
|
2020-03-11 09:32:07 -06:00
|
|
|
if filter_send_to_client:
|
2020-02-21 05:15:07 -07:00
|
|
|
room_ids = {e.room_id for e in events}
|
2019-11-28 12:26:13 -07:00
|
|
|
retention_policies = {}
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2019-11-28 12:26:13 -07:00
|
|
|
for room_id in room_ids:
|
2019-11-28 13:35:22 -07:00
|
|
|
retention_policies[
|
|
|
|
room_id
|
2020-07-27 10:32:08 -06:00
|
|
|
] = await storage.main.get_retention_policy_for_room(room_id)
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2018-06-07 04:37:10 -06:00
|
|
|
def allowed(event):
|
2016-05-11 06:42:37 -06:00
|
|
|
"""
|
|
|
|
Args:
|
|
|
|
event (synapse.events.EventBase): event to check
|
2018-06-25 06:42:55 -06:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
None|EventBase:
|
|
|
|
None if the user cannot see this event at all
|
|
|
|
|
|
|
|
a redacted copy of the event if they can only see a redacted
|
|
|
|
version
|
|
|
|
|
|
|
|
the original event if they can see it as normal.
|
2016-05-11 06:42:37 -06:00
|
|
|
"""
|
2020-03-11 12:49:41 -06:00
|
|
|
# Only run some checks if these events aren't about to be sent to clients. This is
|
|
|
|
# because, if this is not the case, we're probably only checking if the users can
|
|
|
|
# see events in the room at that point in the DAG, and that shouldn't be decided
|
|
|
|
# on those checks.
|
|
|
|
if filter_send_to_client:
|
2020-12-18 02:49:18 -07:00
|
|
|
if event.type == EventTypes.Dummy:
|
2020-03-11 12:49:41 -06:00
|
|
|
return None
|
|
|
|
|
|
|
|
if not event.is_state() and event.sender in ignore_list:
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Until MSC2261 has landed we can't redact malicious alias events, so for
|
|
|
|
# now we temporarily filter out m.room.aliases entirely to mitigate
|
|
|
|
# abuse, while we spec a better solution to advertising aliases
|
|
|
|
# on rooms.
|
|
|
|
if event.type == EventTypes.Aliases:
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Don't try to apply the room's retention policy if the event is a state
|
|
|
|
# event, as MSC1763 states that retention is only considered for non-state
|
|
|
|
# events.
|
|
|
|
if not event.is_state():
|
|
|
|
retention_policy = retention_policies[event.room_id]
|
|
|
|
max_lifetime = retention_policy.get("max_lifetime")
|
|
|
|
|
|
|
|
if max_lifetime is not None:
|
|
|
|
oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime
|
|
|
|
|
|
|
|
if event.origin_server_ts < oldest_allowed_ts:
|
|
|
|
return None
|
2019-11-04 10:09:22 -07:00
|
|
|
|
2017-09-18 10:13:03 -06:00
|
|
|
if event.event_id in always_include_ids:
|
2018-06-25 06:42:55 -06:00
|
|
|
return event
|
2017-09-18 10:13:03 -06:00
|
|
|
|
2016-05-11 06:42:37 -06:00
|
|
|
state = event_id_to_state[event.event_id]
|
|
|
|
|
|
|
|
# get the room_visibility at the time of the event.
|
|
|
|
visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""), None)
|
|
|
|
if visibility_event:
|
2020-12-16 06:46:37 -07:00
|
|
|
visibility = visibility_event.content.get(
|
|
|
|
"history_visibility", HistoryVisibility.SHARED
|
|
|
|
)
|
2016-05-11 06:42:37 -06:00
|
|
|
else:
|
2020-12-16 06:46:37 -07:00
|
|
|
visibility = HistoryVisibility.SHARED
|
2016-05-11 06:42:37 -06:00
|
|
|
|
|
|
|
if visibility not in VISIBILITY_PRIORITY:
|
2020-12-16 06:46:37 -07:00
|
|
|
visibility = HistoryVisibility.SHARED
|
2016-05-11 06:42:37 -06:00
|
|
|
|
|
|
|
# Always allow history visibility events on boundaries. This is done
|
|
|
|
# by setting the effective visibility to the least restrictive
|
|
|
|
# of the old vs new.
|
|
|
|
if event.type == EventTypes.RoomHistoryVisibility:
|
|
|
|
prev_content = event.unsigned.get("prev_content", {})
|
|
|
|
prev_visibility = prev_content.get("history_visibility", None)
|
|
|
|
|
|
|
|
if prev_visibility not in VISIBILITY_PRIORITY:
|
2020-12-16 06:46:37 -07:00
|
|
|
prev_visibility = HistoryVisibility.SHARED
|
2016-05-11 06:42:37 -06:00
|
|
|
|
|
|
|
new_priority = VISIBILITY_PRIORITY.index(visibility)
|
|
|
|
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
|
|
|
|
if old_priority < new_priority:
|
|
|
|
visibility = prev_visibility
|
|
|
|
|
|
|
|
# likewise, if the event is the user's own membership event, use
|
|
|
|
# the 'most joined' membership
|
|
|
|
membership = None
|
|
|
|
if event.type == EventTypes.Member and event.state_key == user_id:
|
|
|
|
membership = event.content.get("membership", None)
|
|
|
|
if membership not in MEMBERSHIP_PRIORITY:
|
|
|
|
membership = "leave"
|
|
|
|
|
|
|
|
prev_content = event.unsigned.get("prev_content", {})
|
|
|
|
prev_membership = prev_content.get("membership", None)
|
|
|
|
if prev_membership not in MEMBERSHIP_PRIORITY:
|
|
|
|
prev_membership = "leave"
|
|
|
|
|
2017-03-23 12:50:31 -06:00
|
|
|
# Always allow the user to see their own leave events, otherwise
|
|
|
|
# they won't see the room disappear if they reject the invite
|
|
|
|
if membership == "leave" and (
|
|
|
|
prev_membership == "join" or prev_membership == "invite"
|
|
|
|
):
|
2018-06-25 06:42:55 -06:00
|
|
|
return event
|
2017-03-23 12:50:31 -06:00
|
|
|
|
2016-05-11 06:42:37 -06:00
|
|
|
new_priority = MEMBERSHIP_PRIORITY.index(membership)
|
|
|
|
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
|
|
|
|
if old_priority < new_priority:
|
|
|
|
membership = prev_membership
|
|
|
|
|
|
|
|
# otherwise, get the user's membership at the time of the event.
|
|
|
|
if membership is None:
|
|
|
|
membership_event = state.get((EventTypes.Member, user_id), None)
|
|
|
|
if membership_event:
|
2018-07-23 10:13:34 -06:00
|
|
|
membership = membership_event.membership
|
2016-05-11 06:42:37 -06:00
|
|
|
|
|
|
|
# if the user was a member of the room at the time of the event,
|
|
|
|
# they can see it.
|
|
|
|
if membership == Membership.JOIN:
|
2018-06-25 06:42:55 -06:00
|
|
|
return event
|
|
|
|
|
|
|
|
# otherwise, it depends on the room visibility.
|
2016-05-11 06:42:37 -06:00
|
|
|
|
2020-12-16 06:46:37 -07:00
|
|
|
if visibility == HistoryVisibility.JOINED:
|
2016-05-11 06:42:37 -06:00
|
|
|
# we weren't a member at the time of the event, so we can't
|
|
|
|
# see this event.
|
2018-06-25 06:42:55 -06:00
|
|
|
return None
|
2016-05-11 06:42:37 -06:00
|
|
|
|
2020-12-16 06:46:37 -07:00
|
|
|
elif visibility == HistoryVisibility.INVITED:
|
2016-05-11 06:42:37 -06:00
|
|
|
# user can also see the event if they were *invited* at the time
|
|
|
|
# of the event.
|
2019-06-20 03:32:02 -06:00
|
|
|
return event if membership == Membership.INVITE else None
|
2018-06-25 06:42:55 -06:00
|
|
|
|
2020-12-16 06:46:37 -07:00
|
|
|
elif visibility == HistoryVisibility.SHARED and is_peeking:
|
2018-06-25 06:42:55 -06:00
|
|
|
# if the visibility is shared, users cannot see the event unless
|
2021-02-03 05:24:53 -07:00
|
|
|
# they have *subsequently* joined the room (or were members at the
|
2018-06-25 06:42:55 -06:00
|
|
|
# time, of course)
|
2016-05-11 06:42:37 -06:00
|
|
|
#
|
|
|
|
# XXX: if the user has subsequently joined and then left again,
|
|
|
|
# ideally we would share history up to the point they left. But
|
2018-06-25 06:42:55 -06:00
|
|
|
# we don't know when they left. We just treat it as though they
|
|
|
|
# never joined, and restrict access.
|
|
|
|
return None
|
|
|
|
|
|
|
|
# the visibility is either shared or world_readable, and the user was
|
|
|
|
# not a member at the time. We allow it, provided the original sender
|
|
|
|
# has not requested their data to be erased, in which case, we return
|
|
|
|
# a redacted version.
|
|
|
|
if erased_senders[event.sender]:
|
|
|
|
return prune_event(event)
|
|
|
|
|
|
|
|
return event
|
|
|
|
|
|
|
|
# check each event: gives an iterable[None|EventBase]
|
2018-07-20 23:47:18 -06:00
|
|
|
filtered_events = map(allowed, events)
|
2018-06-25 06:42:55 -06:00
|
|
|
|
|
|
|
# remove the None entries
|
|
|
|
filtered_events = filter(operator.truth, filtered_events)
|
2016-05-11 06:42:37 -06:00
|
|
|
|
2018-06-25 06:42:55 -06:00
|
|
|
# we turn it into a list before returning it.
|
2019-07-23 07:00:55 -06:00
|
|
|
return list(filtered_events)
|
2018-07-16 04:38:45 -06:00
|
|
|
|
|
|
|
|
2020-07-27 10:32:08 -06:00
|
|
|
async def filter_events_for_server(
|
2019-10-23 10:25:54 -06:00
|
|
|
storage: Storage,
|
|
|
|
server_name,
|
|
|
|
events,
|
|
|
|
redact=True,
|
|
|
|
check_history_visibility_only=False,
|
2019-06-20 03:32:02 -06:00
|
|
|
):
|
2019-02-20 09:54:35 -07:00
|
|
|
"""Filter a list of events based on whether given server is allowed to
|
|
|
|
see them.
|
|
|
|
|
|
|
|
Args:
|
2019-10-23 10:25:54 -06:00
|
|
|
storage
|
2019-02-20 09:54:35 -07:00
|
|
|
server_name (str)
|
|
|
|
events (iterable[FrozenEvent])
|
|
|
|
redact (bool): Whether to return a redacted version of the event, or
|
|
|
|
to filter them out entirely.
|
2019-03-04 07:34:34 -07:00
|
|
|
check_history_visibility_only (bool): Whether to only check the
|
|
|
|
history visibility, rather than things like if the sender has been
|
|
|
|
erased. This is used e.g. during pagination to decide whether to
|
|
|
|
backfill or not.
|
2019-02-20 09:54:35 -07:00
|
|
|
|
|
|
|
Returns
|
2020-07-27 10:32:08 -06:00
|
|
|
list[FrozenEvent]
|
2019-02-20 09:54:35 -07:00
|
|
|
"""
|
2018-07-16 08:22:27 -06:00
|
|
|
|
2019-03-04 07:34:34 -07:00
|
|
|
def is_sender_erased(event, erased_senders):
|
2019-03-04 09:04:04 -07:00
|
|
|
if erased_senders and erased_senders[event.sender]:
|
2019-06-20 03:32:02 -06:00
|
|
|
logger.info("Sender of %s has been erased, redacting", event.event_id)
|
2019-03-04 07:34:34 -07:00
|
|
|
return True
|
|
|
|
return False
|
2018-07-16 08:22:27 -06:00
|
|
|
|
2019-03-04 07:34:34 -07:00
|
|
|
def check_event_is_visible(event, state):
|
2019-06-20 03:32:02 -06:00
|
|
|
history = state.get((EventTypes.RoomHistoryVisibility, ""), None)
|
2018-07-16 08:22:27 -06:00
|
|
|
if history:
|
2020-12-16 06:46:37 -07:00
|
|
|
visibility = history.content.get(
|
|
|
|
"history_visibility", HistoryVisibility.SHARED
|
|
|
|
)
|
|
|
|
if visibility in [HistoryVisibility.INVITED, HistoryVisibility.JOINED]:
|
2018-07-16 08:22:27 -06:00
|
|
|
# We now loop through all state events looking for
|
|
|
|
# membership states for the requesting server to determine
|
|
|
|
# if the server is either in the room or has been invited
|
|
|
|
# into the room.
|
2020-06-15 05:03:36 -06:00
|
|
|
for ev in state.values():
|
2018-07-16 08:22:27 -06:00
|
|
|
if ev.type != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
domain = get_domain_from_id(ev.state_key)
|
|
|
|
except Exception:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if domain != server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
memtype = ev.membership
|
|
|
|
if memtype == Membership.JOIN:
|
2019-03-04 07:34:34 -07:00
|
|
|
return True
|
2018-07-16 08:22:27 -06:00
|
|
|
elif memtype == Membership.INVITE:
|
2020-12-16 06:46:37 -07:00
|
|
|
if visibility == HistoryVisibility.INVITED:
|
2019-03-04 07:34:34 -07:00
|
|
|
return True
|
2018-07-16 08:22:27 -06:00
|
|
|
else:
|
|
|
|
# server has no users in the room: redact
|
2019-03-04 07:34:34 -07:00
|
|
|
return False
|
2018-07-16 08:22:27 -06:00
|
|
|
|
2019-03-04 07:34:34 -07:00
|
|
|
return True
|
2018-07-16 08:22:27 -06:00
|
|
|
|
2019-03-05 02:16:35 -07:00
|
|
|
# Lets check to see if all the events have a history visibility
|
2020-07-09 07:52:58 -06:00
|
|
|
# of "shared" or "world_readable". If that's the case then we don't
|
2018-07-16 04:38:45 -06:00
|
|
|
# need to check membership (as we know the server is in the room).
|
2020-07-27 10:32:08 -06:00
|
|
|
event_to_state_ids = await storage.state.get_state_ids_for_events(
|
2018-07-16 04:38:45 -06:00
|
|
|
frozenset(e.event_id for e in events),
|
2018-10-25 10:49:55 -06:00
|
|
|
state_filter=StateFilter.from_types(
|
2019-06-20 03:32:02 -06:00
|
|
|
types=((EventTypes.RoomHistoryVisibility, ""),)
|
|
|
|
),
|
2018-07-16 04:38:45 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
visibility_ids = set()
|
2020-06-15 05:03:36 -06:00
|
|
|
for sids in event_to_state_ids.values():
|
2018-07-16 04:38:45 -06:00
|
|
|
hist = sids.get((EventTypes.RoomHistoryVisibility, ""))
|
|
|
|
if hist:
|
|
|
|
visibility_ids.add(hist)
|
|
|
|
|
|
|
|
# If we failed to find any history visibility events then the default
|
2020-07-09 07:52:58 -06:00
|
|
|
# is "shared" visibility.
|
2018-07-16 04:38:45 -06:00
|
|
|
if not visibility_ids:
|
2018-07-16 08:22:27 -06:00
|
|
|
all_open = True
|
|
|
|
else:
|
2020-07-27 10:32:08 -06:00
|
|
|
event_map = await storage.main.get_events(visibility_ids)
|
2018-07-16 08:22:27 -06:00
|
|
|
all_open = all(
|
2020-12-16 06:46:37 -07:00
|
|
|
e.content.get("history_visibility")
|
|
|
|
in (None, HistoryVisibility.SHARED, HistoryVisibility.WORLD_READABLE)
|
2020-06-15 05:03:36 -06:00
|
|
|
for e in event_map.values()
|
2018-07-16 08:22:27 -06:00
|
|
|
)
|
2018-07-16 04:38:45 -06:00
|
|
|
|
2019-03-04 07:34:34 -07:00
|
|
|
if not check_history_visibility_only:
|
2020-07-27 10:32:08 -06:00
|
|
|
erased_senders = await storage.main.are_users_erased((e.sender for e in events))
|
2019-03-04 07:34:34 -07:00
|
|
|
else:
|
|
|
|
# We don't want to check whether users are erased, which is equivalent
|
|
|
|
# to no users having been erased.
|
|
|
|
erased_senders = {}
|
|
|
|
|
2018-07-16 04:38:45 -06:00
|
|
|
if all_open:
|
2018-07-16 08:22:27 -06:00
|
|
|
# all the history_visibility state affecting these events is open, so
|
|
|
|
# we don't need to filter by membership state. We *do* need to check
|
|
|
|
# for user erasure, though.
|
|
|
|
if erased_senders:
|
2019-03-04 07:34:34 -07:00
|
|
|
to_return = []
|
|
|
|
for e in events:
|
|
|
|
if not is_sender_erased(e, erased_senders):
|
|
|
|
to_return.append(e)
|
|
|
|
elif redact:
|
|
|
|
to_return.append(prune_event(e))
|
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return to_return
|
2018-07-16 08:22:27 -06:00
|
|
|
|
2019-03-04 07:34:34 -07:00
|
|
|
# If there are no erased users then we can just return the given list
|
|
|
|
# of events without having to copy it.
|
2019-07-23 07:00:55 -06:00
|
|
|
return events
|
2018-07-16 04:38:45 -06:00
|
|
|
|
|
|
|
# Ok, so we're dealing with events that have non-trivial visibility
|
|
|
|
# rules, so we need to also get the memberships of the room.
|
|
|
|
|
2018-07-13 09:32:46 -06:00
|
|
|
# first, for each event we're wanting to return, get the event_ids
|
|
|
|
# of the history vis and membership state at those events.
|
2020-07-27 10:32:08 -06:00
|
|
|
event_to_state_ids = await storage.state.get_state_ids_for_events(
|
2018-07-16 04:38:45 -06:00
|
|
|
frozenset(e.event_id for e in events),
|
2018-10-25 10:49:55 -06:00
|
|
|
state_filter=StateFilter.from_types(
|
2019-06-20 03:32:02 -06:00
|
|
|
types=((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, None))
|
|
|
|
),
|
2018-07-16 04:38:45 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
# We only want to pull out member events that correspond to the
|
|
|
|
# server's domain.
|
2018-07-13 09:32:46 -06:00
|
|
|
#
|
|
|
|
# event_to_state_ids contains lots of duplicates, so it turns out to be
|
2018-10-08 06:44:58 -06:00
|
|
|
# cheaper to build a complete event_id => (type, state_key) dict, and then
|
|
|
|
# filter out the ones we don't want
|
2018-07-13 09:32:46 -06:00
|
|
|
#
|
2018-10-08 06:44:58 -06:00
|
|
|
event_id_to_state_key = {
|
|
|
|
event_id: key
|
2020-06-15 05:03:36 -06:00
|
|
|
for key_to_eid in event_to_state_ids.values()
|
|
|
|
for key, event_id in key_to_eid.items()
|
2018-07-13 09:32:46 -06:00
|
|
|
}
|
2018-07-16 04:38:45 -06:00
|
|
|
|
2018-07-13 09:32:46 -06:00
|
|
|
def include(typ, state_key):
|
|
|
|
if typ != EventTypes.Member:
|
|
|
|
return True
|
2018-07-17 04:13:57 -06:00
|
|
|
|
|
|
|
# we avoid using get_domain_from_id here for efficiency.
|
2018-07-13 09:32:46 -06:00
|
|
|
idx = state_key.find(":")
|
|
|
|
if idx == -1:
|
2018-07-16 04:38:45 -06:00
|
|
|
return False
|
2019-06-20 03:32:02 -06:00
|
|
|
return state_key[idx + 1 :] == server_name
|
|
|
|
|
2020-07-27 10:32:08 -06:00
|
|
|
event_map = await storage.main.get_events(
|
2020-06-15 05:03:36 -06:00
|
|
|
[e_id for e_id, key in event_id_to_state_key.items() if include(key[0], key[1])]
|
2019-06-20 03:32:02 -06:00
|
|
|
)
|
2018-07-16 04:38:45 -06:00
|
|
|
|
|
|
|
event_to_state = {
|
|
|
|
e_id: {
|
|
|
|
key: event_map[inner_e_id]
|
2020-06-15 05:03:36 -06:00
|
|
|
for key, inner_e_id in key_to_eid.items()
|
2018-07-16 04:38:45 -06:00
|
|
|
if inner_e_id in event_map
|
|
|
|
}
|
2020-06-15 05:03:36 -06:00
|
|
|
for e_id, key_to_eid in event_to_state_ids.items()
|
2018-07-16 04:38:45 -06:00
|
|
|
}
|
|
|
|
|
2019-03-04 07:34:34 -07:00
|
|
|
to_return = []
|
|
|
|
for e in events:
|
|
|
|
erased = is_sender_erased(e, erased_senders)
|
|
|
|
visible = check_event_is_visible(e, event_to_state[e.event_id])
|
|
|
|
if visible and not erased:
|
|
|
|
to_return.append(e)
|
|
|
|
elif redact:
|
|
|
|
to_return.append(prune_event(e))
|
|
|
|
|
2019-07-23 07:00:55 -06:00
|
|
|
return to_return
|