2020-07-01 04:08:25 -06:00
|
|
|
# Copyright 2015 OpenMarket Ltd
|
|
|
|
# Copyright 2018 New Vector Ltd
|
2015-12-10 10:51:15 -07:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2022-08-04 13:38:08 -06:00
|
|
|
|
|
|
|
"""Responsible for storing and fetching push actions / notifications.
|
|
|
|
|
|
|
|
There are two main uses for push actions:
|
|
|
|
1. Sending out push to a user's device; and
|
|
|
|
2. Tracking per-room per-user notification counts (used in sync requests).
|
|
|
|
|
|
|
|
For the former we simply use the `event_push_actions` table, which contains all
|
|
|
|
the calculated actions for a given user (which were calculated by the
|
|
|
|
`BulkPushRuleEvaluator`).
|
|
|
|
|
|
|
|
For the latter we could simply count the number of rows in `event_push_actions`
|
|
|
|
table for a given room/user, but in practice this is *very* heavyweight when
|
|
|
|
there were a large number of notifications (due to e.g. the user never reading a
|
|
|
|
room). Plus, keeping all push actions indefinitely uses a lot of disk space.
|
|
|
|
|
|
|
|
To fix these issues, we add a new table `event_push_summary` that tracks
|
|
|
|
per-user per-room counts of all notifications that happened before a stream
|
|
|
|
ordering S. Thus, to get the notification count for a user / room we can simply
|
|
|
|
query a single row in `event_push_summary` and count the number of rows in
|
|
|
|
`event_push_actions` with a stream ordering larger than S (and as long as S is
|
|
|
|
"recent", the number of rows needing to be scanned will be small).
|
|
|
|
|
|
|
|
The `event_push_summary` table is updated via a background job that periodically
|
|
|
|
chooses a new stream ordering S' (usually the latest stream ordering), counts
|
|
|
|
all notifications in `event_push_actions` between the existing S and S', and
|
|
|
|
adds them to the existing counts in `event_push_summary`.
|
|
|
|
|
|
|
|
This allows us to delete old rows from `event_push_actions` once those rows have
|
|
|
|
been counted and added to `event_push_summary` (we call this process
|
|
|
|
"rotation").
|
|
|
|
|
|
|
|
|
|
|
|
We need to handle when a user sends a read receipt to the room. Again this is
|
|
|
|
done as a background process. For each receipt we clear the row in
|
|
|
|
`event_push_summary` and count the number of notifications in
|
|
|
|
`event_push_actions` that happened after the receipt but before S, and insert
|
|
|
|
that count into `event_push_summary` (If the receipt happened *after* S then we
|
|
|
|
simply clear the `event_push_summary`.)
|
|
|
|
|
|
|
|
Note that its possible that if the read receipt is for an old event the relevant
|
|
|
|
`event_push_actions` rows will have been rotated and we get the wrong count
|
|
|
|
(it'll be too low). We accept this as a rare edge case that is unlikely to
|
|
|
|
impact the user much (since the vast majority of read receipts will be for the
|
|
|
|
latest event).
|
|
|
|
|
|
|
|
The last complication is to handle the race where we request the notifications
|
|
|
|
counts after a user sends a read receipt into the room, but *before* the
|
|
|
|
background update handles the receipt (without any special handling the counts
|
|
|
|
would be outdated). We fix this by including in `event_push_summary` the read
|
|
|
|
receipt we used when updating `event_push_summary`, and every time we query the
|
|
|
|
table we check if that matches the most recent read receipt in the room. If yes,
|
|
|
|
continue as above, if not we simply query the `event_push_actions` table
|
|
|
|
directly.
|
|
|
|
|
|
|
|
Since read receipts are almost always for recent events, scanning the
|
|
|
|
`event_push_actions` table in this case is unlikely to be a problem. Even if it
|
|
|
|
is a problem, it is temporary until the background job handles the new read
|
|
|
|
receipt.
|
|
|
|
"""
|
|
|
|
|
2015-12-10 10:51:15 -07:00
|
|
|
import logging
|
2022-08-16 05:22:17 -06:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Collection,
|
|
|
|
Dict,
|
|
|
|
List,
|
|
|
|
Mapping,
|
|
|
|
Optional,
|
|
|
|
Tuple,
|
|
|
|
Union,
|
|
|
|
cast,
|
|
|
|
)
|
2020-09-02 10:19:37 -06:00
|
|
|
|
|
|
|
import attr
|
2018-06-28 07:49:57 -06:00
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
from synapse.api.constants import ReceiptTypes
|
2020-10-09 05:37:51 -06:00
|
|
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
2022-08-05 09:09:33 -06:00
|
|
|
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
2021-12-13 10:05:00 -07:00
|
|
|
from synapse.storage.database import (
|
|
|
|
DatabasePool,
|
|
|
|
LoggingDatabaseConnection,
|
|
|
|
LoggingTransaction,
|
|
|
|
)
|
2022-06-15 09:17:14 -06:00
|
|
|
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
2022-06-29 04:32:38 -06:00
|
|
|
from synapse.storage.databases.main.stream import StreamWorkerStore
|
2022-09-14 11:11:16 -06:00
|
|
|
from synapse.types import JsonDict
|
2020-08-07 06:02:55 -06:00
|
|
|
from synapse.util import json_encoder
|
2020-08-14 05:24:26 -06:00
|
|
|
from synapse.util.caches.descriptors import cached
|
2018-04-28 05:19:12 -06:00
|
|
|
|
2021-10-22 11:15:41 -06:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2015-12-10 10:51:15 -07:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
DEFAULT_NOTIF_ACTION: List[Union[dict, str]] = [
|
|
|
|
"notify",
|
|
|
|
{"set_tweak": "highlight", "value": False},
|
|
|
|
]
|
|
|
|
DEFAULT_HIGHLIGHT_ACTION: List[Union[dict, str]] = [
|
2019-04-03 03:07:29 -06:00
|
|
|
"notify",
|
|
|
|
{"set_tweak": "sound", "value": "default"},
|
|
|
|
{"set_tweak": "highlight"},
|
2017-02-14 09:37:25 -07:00
|
|
|
]
|
|
|
|
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class HttpPushAction:
|
|
|
|
"""
|
|
|
|
HttpPushAction instances include the information used to generate HTTP
|
|
|
|
requests to a push gateway.
|
|
|
|
"""
|
2021-11-30 04:49:20 -07:00
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
event_id: str
|
2021-11-30 04:49:20 -07:00
|
|
|
room_id: str
|
|
|
|
stream_ordering: int
|
2021-12-21 06:25:34 -07:00
|
|
|
actions: List[Union[dict, str]]
|
2021-11-30 04:49:20 -07:00
|
|
|
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2021-11-30 04:49:20 -07:00
|
|
|
class EmailPushAction(HttpPushAction):
|
2021-12-21 06:25:34 -07:00
|
|
|
"""
|
|
|
|
EmailPushAction instances include the information used to render an email
|
|
|
|
push notification.
|
|
|
|
"""
|
|
|
|
|
2021-11-30 04:49:20 -07:00
|
|
|
received_ts: Optional[int]
|
|
|
|
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class UserPushAction(EmailPushAction):
|
|
|
|
"""
|
|
|
|
UserPushAction instances include the necessary information to respond to
|
|
|
|
/notifications requests.
|
|
|
|
"""
|
|
|
|
|
|
|
|
topological_ordering: int
|
|
|
|
highlight: bool
|
|
|
|
profile_tag: str
|
|
|
|
|
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2021-12-21 06:25:34 -07:00
|
|
|
class NotifCounts:
|
|
|
|
"""
|
|
|
|
The per-user, per-room count of notifications. Used by sync and push.
|
|
|
|
"""
|
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
notify_count: int = 0
|
|
|
|
unread_count: int = 0
|
|
|
|
highlight_count: int = 0
|
2021-12-21 06:25:34 -07:00
|
|
|
|
|
|
|
|
2022-08-16 05:22:17 -06:00
|
|
|
def _serialize_action(
|
|
|
|
actions: Collection[Union[Mapping, str]], is_highlight: bool
|
|
|
|
) -> str:
|
2017-02-14 09:54:37 -07:00
|
|
|
"""Custom serializer for actions. This allows us to "compress" common actions.
|
|
|
|
|
|
|
|
We use the fact that most users have the same actions for notifs (and for
|
2017-02-16 08:03:36 -07:00
|
|
|
highlights).
|
|
|
|
We store these default actions as the empty string rather than the full JSON.
|
|
|
|
Since the empty string isn't valid JSON there is no risk of this clashing with
|
|
|
|
any real JSON actions
|
2017-02-14 09:54:37 -07:00
|
|
|
"""
|
2017-02-14 09:37:25 -07:00
|
|
|
if is_highlight:
|
2017-02-14 09:54:37 -07:00
|
|
|
if actions == DEFAULT_HIGHLIGHT_ACTION:
|
|
|
|
return "" # We use empty string as the column is non-NULL
|
2017-02-14 09:37:25 -07:00
|
|
|
else:
|
2017-02-14 09:54:37 -07:00
|
|
|
if actions == DEFAULT_NOTIF_ACTION:
|
2017-02-14 09:37:25 -07:00
|
|
|
return ""
|
2020-08-07 06:02:55 -06:00
|
|
|
return json_encoder.encode(actions)
|
2017-02-14 09:37:25 -07:00
|
|
|
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, str]]:
|
2017-02-14 09:54:37 -07:00
|
|
|
"""Custom deserializer for actions. This allows us to "compress" common actions"""
|
2017-02-14 09:37:25 -07:00
|
|
|
if actions:
|
2020-07-16 09:32:19 -06:00
|
|
|
return db_to_json(actions)
|
2017-02-14 09:37:25 -07:00
|
|
|
|
|
|
|
if is_highlight:
|
2017-02-14 09:54:37 -07:00
|
|
|
return DEFAULT_HIGHLIGHT_ACTION
|
2017-02-14 09:37:25 -07:00
|
|
|
else:
|
2017-02-14 09:54:37 -07:00
|
|
|
return DEFAULT_NOTIF_ACTION
|
2017-02-14 09:37:25 -07:00
|
|
|
|
|
|
|
|
2022-06-29 04:32:38 -06:00
|
|
|
class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBaseStore):
|
2021-12-13 10:05:00 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2020-09-18 07:56:44 -06:00
|
|
|
super().__init__(database, db_conn, hs)
|
2018-03-01 07:05:41 -07:00
|
|
|
|
2018-03-01 08:59:40 -07:00
|
|
|
# These get correctly set by _find_stream_orderings_for_times_txn
|
2021-12-21 06:25:34 -07:00
|
|
|
self.stream_ordering_month_ago: Optional[int] = None
|
|
|
|
self.stream_ordering_day_ago: Optional[int] = None
|
2018-03-01 07:05:41 -07:00
|
|
|
|
2020-10-02 08:20:45 -06:00
|
|
|
cur = db_conn.cursor(txn_name="_find_stream_orderings_for_times_txn")
|
2018-03-01 07:05:41 -07:00
|
|
|
self._find_stream_orderings_for_times_txn(cur)
|
|
|
|
cur.close()
|
|
|
|
|
|
|
|
self.find_stream_orderings_looping_call = self._clock.looping_call(
|
|
|
|
self._find_stream_orderings_for_times, 10 * 60 * 1000
|
|
|
|
)
|
2020-10-09 05:37:51 -06:00
|
|
|
|
2018-02-14 06:37:56 -07:00
|
|
|
self._rotate_count = 10000
|
2020-10-09 05:37:51 -06:00
|
|
|
self._doing_notif_rotation = False
|
2021-09-13 11:07:12 -06:00
|
|
|
if hs.config.worker.run_background_tasks:
|
2020-10-09 05:37:51 -06:00
|
|
|
self._rotate_notif_loop = self._clock.looping_call(
|
2022-06-17 04:58:00 -06:00
|
|
|
self._rotate_notifs, 30 * 1000
|
2020-10-09 05:37:51 -06:00
|
|
|
)
|
2018-03-01 07:05:41 -07:00
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
self.db_pool.updates.register_background_index_update(
|
|
|
|
"event_push_summary_unique_index",
|
|
|
|
index_name="event_push_summary_unique_index",
|
|
|
|
table="event_push_summary",
|
|
|
|
columns=["user_id", "room_id"],
|
|
|
|
unique=True,
|
|
|
|
replaces_index="event_push_summary_user_rm",
|
|
|
|
)
|
|
|
|
|
2022-09-14 11:11:16 -06:00
|
|
|
self.db_pool.updates.register_background_index_update(
|
|
|
|
"event_push_summary_unique_index2",
|
|
|
|
index_name="event_push_summary_unique_index2",
|
|
|
|
table="event_push_summary",
|
|
|
|
columns=["user_id", "room_id", "thread_id"],
|
|
|
|
unique=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
self.db_pool.updates.register_background_update_handler(
|
|
|
|
"event_push_backfill_thread_id",
|
|
|
|
self._background_backfill_thread_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
async def _background_backfill_thread_id(
|
|
|
|
self, progress: JsonDict, batch_size: int
|
|
|
|
) -> int:
|
|
|
|
"""
|
|
|
|
Fill in the thread_id field for event_push_actions and event_push_summary.
|
|
|
|
|
|
|
|
This is preparatory so that it can be made non-nullable in the future.
|
|
|
|
|
|
|
|
Because all current (null) data is done in an unthreaded manner this
|
|
|
|
simply assumes it is on the "main" timeline. Since event_push_actions
|
|
|
|
are periodically cleared it is not possible to correctly re-calculate
|
|
|
|
the thread_id.
|
|
|
|
"""
|
|
|
|
event_push_actions_done = progress.get("event_push_actions_done", False)
|
|
|
|
|
|
|
|
def add_thread_id_txn(
|
|
|
|
txn: LoggingTransaction, table_name: str, start_stream_ordering: int
|
|
|
|
) -> int:
|
|
|
|
sql = f"""
|
|
|
|
SELECT stream_ordering
|
|
|
|
FROM {table_name}
|
|
|
|
WHERE
|
|
|
|
thread_id IS NULL
|
|
|
|
AND stream_ordering > ?
|
|
|
|
ORDER BY stream_ordering
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (start_stream_ordering, batch_size))
|
|
|
|
|
|
|
|
# No more rows to process.
|
|
|
|
rows = txn.fetchall()
|
|
|
|
if not rows:
|
|
|
|
progress[f"{table_name}_done"] = True
|
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
|
|
|
txn, "event_push_backfill_thread_id", progress
|
|
|
|
)
|
|
|
|
return 0
|
|
|
|
|
|
|
|
# Update the thread ID for any of those rows.
|
|
|
|
max_stream_ordering = rows[-1][0]
|
|
|
|
|
|
|
|
sql = f"""
|
|
|
|
UPDATE {table_name}
|
|
|
|
SET thread_id = 'main'
|
|
|
|
WHERE stream_ordering <= ? AND thread_id IS NULL
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (max_stream_ordering,))
|
|
|
|
|
|
|
|
# Update progress.
|
|
|
|
processed_rows = txn.rowcount
|
|
|
|
progress[f"max_{table_name}_stream_ordering"] = max_stream_ordering
|
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
|
|
|
txn, "event_push_backfill_thread_id", progress
|
|
|
|
)
|
|
|
|
|
|
|
|
return processed_rows
|
|
|
|
|
|
|
|
# First update the event_push_actions table, then the event_push_summary table.
|
|
|
|
#
|
|
|
|
# Note that the event_push_actions_staging table is ignored since it is
|
|
|
|
# assumed that items in that table will only exist for a short period of
|
|
|
|
# time.
|
|
|
|
if not event_push_actions_done:
|
|
|
|
result = await self.db_pool.runInteraction(
|
|
|
|
"event_push_backfill_thread_id",
|
|
|
|
add_thread_id_txn,
|
|
|
|
"event_push_actions",
|
|
|
|
progress.get("max_event_push_actions_stream_ordering", 0),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
result = await self.db_pool.runInteraction(
|
|
|
|
"event_push_backfill_thread_id",
|
|
|
|
add_thread_id_txn,
|
|
|
|
"event_push_summary",
|
|
|
|
progress.get("max_event_push_summary_stream_ordering", 0),
|
|
|
|
)
|
|
|
|
|
|
|
|
# Only done after the event_push_summary table is done.
|
|
|
|
if not result:
|
|
|
|
await self.db_pool.updates._end_background_update(
|
|
|
|
"event_push_backfill_thread_id"
|
|
|
|
)
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
@cached(tree=True, max_entries=5000)
|
2020-08-14 05:24:26 -06:00
|
|
|
async def get_unread_event_push_actions_by_room_for_user(
|
2020-09-02 10:19:37 -06:00
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
user_id: str,
|
2021-12-21 06:25:34 -07:00
|
|
|
) -> NotifCounts:
|
2020-09-02 10:19:37 -06:00
|
|
|
"""Get the notification count, the highlight count and the unread message count
|
2022-08-15 07:33:17 -06:00
|
|
|
for a given user in a given room after their latest read receipt.
|
2020-09-02 10:19:37 -06:00
|
|
|
|
|
|
|
Note that this function assumes the user to be a current member of the room,
|
|
|
|
since it's either called by the sync handler to handle joined room entries, or by
|
|
|
|
the HTTP pusher to calculate the badge of unread joined rooms.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The room to retrieve the counts in.
|
|
|
|
user_id: The user to retrieve the counts for.
|
|
|
|
|
|
|
|
Returns
|
2022-08-15 07:33:17 -06:00
|
|
|
A NotifCounts object containing the notification count, the highlight count
|
|
|
|
and the unread message count.
|
2020-09-02 10:19:37 -06:00
|
|
|
"""
|
2020-08-14 05:24:26 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2017-02-03 11:12:53 -07:00
|
|
|
"get_unread_event_push_actions_by_room",
|
|
|
|
self._get_unread_counts_by_receipt_txn,
|
2019-04-03 03:07:29 -06:00
|
|
|
room_id,
|
|
|
|
user_id,
|
2017-02-03 11:12:53 -07:00
|
|
|
)
|
2016-07-04 12:44:55 -06:00
|
|
|
|
2019-04-03 03:07:29 -06:00
|
|
|
def _get_unread_counts_by_receipt_txn(
|
2020-09-02 10:19:37 -06:00
|
|
|
self,
|
2021-12-21 06:25:34 -07:00
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
user_id: str,
|
|
|
|
) -> NotifCounts:
|
2022-08-15 07:33:17 -06:00
|
|
|
# Get the stream ordering of the user's latest receipt in the room.
|
2022-09-29 05:07:31 -06:00
|
|
|
result = self.get_last_unthreaded_receipt_for_user_txn(
|
2022-06-15 09:17:14 -06:00
|
|
|
txn,
|
|
|
|
user_id,
|
|
|
|
room_id,
|
2022-09-29 05:07:31 -06:00
|
|
|
receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
|
2022-06-15 09:17:14 -06:00
|
|
|
)
|
2020-09-02 10:19:37 -06:00
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
if result:
|
|
|
|
_, stream_ordering = result
|
2020-09-02 10:19:37 -06:00
|
|
|
|
2022-08-15 07:33:17 -06:00
|
|
|
else:
|
|
|
|
# If the user has no receipts in the room, retrieve the stream ordering for
|
2020-09-02 10:19:37 -06:00
|
|
|
# the latest membership event from this user in this room (which we assume is
|
|
|
|
# a join).
|
|
|
|
event_id = self.db_pool.simple_select_one_onecol_txn(
|
|
|
|
txn=txn,
|
|
|
|
table="local_current_membership",
|
|
|
|
keyvalues={"room_id": room_id, "user_id": user_id},
|
|
|
|
retcol="event_id",
|
|
|
|
)
|
2016-11-23 08:57:04 -07:00
|
|
|
|
2022-06-29 04:32:38 -06:00
|
|
|
stream_ordering = self.get_stream_id_for_event_txn(txn, event_id)
|
2016-11-23 08:57:04 -07:00
|
|
|
|
2017-02-03 11:12:53 -07:00
|
|
|
return self._get_unread_counts_by_pos_txn(
|
2018-05-11 08:30:11 -06:00
|
|
|
txn, room_id, user_id, stream_ordering
|
2017-02-03 11:12:53 -07:00
|
|
|
)
|
2016-11-23 08:57:04 -07:00
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
def _get_unread_counts_by_pos_txn(
|
2022-08-15 07:33:17 -06:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
user_id: str,
|
|
|
|
receipt_stream_ordering: int,
|
2021-12-21 06:25:34 -07:00
|
|
|
) -> NotifCounts:
|
2022-06-15 09:17:14 -06:00
|
|
|
"""Get the number of unread messages for a user/room that have happened
|
|
|
|
since the given stream ordering.
|
2022-08-15 07:33:17 -06:00
|
|
|
|
|
|
|
Args:
|
|
|
|
txn: The database transaction.
|
|
|
|
room_id: The room ID to get unread counts for.
|
|
|
|
user_id: The user ID to get unread counts for.
|
|
|
|
receipt_stream_ordering: The stream ordering of the user's latest
|
|
|
|
receipt in the room. If there are no receipts, the stream ordering
|
|
|
|
of the user's join event.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
A NotifCounts object containing the notification count, the highlight count
|
|
|
|
and the unread message count.
|
2022-06-15 09:17:14 -06:00
|
|
|
"""
|
2020-09-02 10:19:37 -06:00
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
counts = NotifCounts()
|
2017-02-03 11:12:53 -07:00
|
|
|
|
2022-06-28 06:13:44 -06:00
|
|
|
# First we pull the counts from the summary table.
|
|
|
|
#
|
|
|
|
# We check that `last_receipt_stream_ordering` matches the stream
|
|
|
|
# ordering given. If it doesn't match then a new read receipt has arrived and
|
|
|
|
# we haven't yet updated the counts in `event_push_summary` to reflect
|
|
|
|
# that; in that case we simply ignore `event_push_summary` counts
|
|
|
|
# and do a manual count of all of the rows in the `event_push_actions` table
|
|
|
|
# for this user/room.
|
|
|
|
#
|
|
|
|
# If `last_receipt_stream_ordering` is null then that means it's up to
|
|
|
|
# date (as the row was written by an older version of Synapse that
|
|
|
|
# updated `event_push_summary` synchronously when persisting a new read
|
|
|
|
# receipt).
|
2019-04-03 03:07:29 -06:00
|
|
|
txn.execute(
|
|
|
|
"""
|
2022-06-15 09:17:14 -06:00
|
|
|
SELECT stream_ordering, notif_count, COALESCE(unread_count, 0)
|
|
|
|
FROM event_push_summary
|
2022-06-28 06:13:44 -06:00
|
|
|
WHERE room_id = ? AND user_id = ?
|
|
|
|
AND (
|
|
|
|
(last_receipt_stream_ordering IS NULL AND stream_ordering > ?)
|
|
|
|
OR last_receipt_stream_ordering = ?
|
|
|
|
)
|
2020-09-02 10:19:37 -06:00
|
|
|
""",
|
2022-08-15 07:33:17 -06:00
|
|
|
(room_id, user_id, receipt_stream_ordering, receipt_stream_ordering),
|
2019-04-03 03:07:29 -06:00
|
|
|
)
|
2017-02-03 11:12:53 -07:00
|
|
|
row = txn.fetchone()
|
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
summary_stream_ordering = 0
|
2020-09-02 10:19:37 -06:00
|
|
|
if row:
|
2022-06-15 09:17:14 -06:00
|
|
|
summary_stream_ordering = row[0]
|
|
|
|
counts.notify_count += row[1]
|
|
|
|
counts.unread_count += row[2]
|
2020-09-07 08:15:06 -06:00
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
# Next we need to count highlights, which aren't summarised
|
2022-06-15 09:17:14 -06:00
|
|
|
sql = """
|
|
|
|
SELECT COUNT(*) FROM event_push_actions
|
|
|
|
WHERE user_id = ?
|
|
|
|
AND room_id = ?
|
|
|
|
AND stream_ordering > ?
|
|
|
|
AND highlight = 1
|
|
|
|
"""
|
2022-08-15 07:33:17 -06:00
|
|
|
txn.execute(sql, (user_id, room_id, receipt_stream_ordering))
|
2022-06-15 09:17:14 -06:00
|
|
|
row = txn.fetchone()
|
|
|
|
if row:
|
|
|
|
counts.highlight_count += row[0]
|
2020-09-02 10:19:37 -06:00
|
|
|
|
2022-06-28 06:13:44 -06:00
|
|
|
# Finally we need to count push actions that aren't included in the
|
2022-08-15 07:33:17 -06:00
|
|
|
# summary returned above. This might be due to recent events that haven't
|
|
|
|
# been summarised yet or the summary is out of date due to a recent read
|
|
|
|
# receipt.
|
|
|
|
start_unread_stream_ordering = max(
|
|
|
|
receipt_stream_ordering, summary_stream_ordering
|
|
|
|
)
|
2022-06-15 09:17:14 -06:00
|
|
|
notify_count, unread_count = self._get_notif_unread_count_for_user_room(
|
2022-08-15 07:33:17 -06:00
|
|
|
txn, room_id, user_id, start_unread_stream_ordering
|
2021-12-21 06:25:34 -07:00
|
|
|
)
|
2015-12-10 10:51:15 -07:00
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
counts.notify_count += notify_count
|
|
|
|
counts.unread_count += unread_count
|
|
|
|
|
|
|
|
return counts
|
|
|
|
|
|
|
|
def _get_notif_unread_count_for_user_room(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
user_id: str,
|
|
|
|
stream_ordering: int,
|
|
|
|
max_stream_ordering: Optional[int] = None,
|
|
|
|
) -> Tuple[int, int]:
|
|
|
|
"""Returns the notify and unread counts from `event_push_actions` for
|
|
|
|
the given user/room in the given range.
|
|
|
|
|
|
|
|
Does not consult `event_push_summary` table, which may include push
|
|
|
|
actions that have been deleted from `event_push_actions` table.
|
2022-08-04 13:24:44 -06:00
|
|
|
|
|
|
|
Args:
|
|
|
|
txn: The database transaction.
|
|
|
|
room_id: The room ID to get unread counts for.
|
|
|
|
user_id: The user ID to get unread counts for.
|
|
|
|
stream_ordering: The (exclusive) minimum stream ordering to consider.
|
|
|
|
max_stream_ordering: The (inclusive) maximum stream ordering to consider.
|
|
|
|
If this is not given, then no maximum is applied.
|
|
|
|
|
|
|
|
Return:
|
|
|
|
A tuple of the notif count and unread count in the given range.
|
2022-06-15 09:17:14 -06:00
|
|
|
"""
|
|
|
|
|
2022-06-29 04:32:38 -06:00
|
|
|
# If there have been no events in the room since the stream ordering,
|
|
|
|
# there can't be any push actions either.
|
|
|
|
if not self._events_stream_cache.has_entity_changed(room_id, stream_ordering):
|
|
|
|
return 0, 0
|
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
clause = ""
|
|
|
|
args = [user_id, room_id, stream_ordering]
|
|
|
|
if max_stream_ordering is not None:
|
|
|
|
clause = "AND ea.stream_ordering <= ?"
|
|
|
|
args.append(max_stream_ordering)
|
|
|
|
|
2022-06-29 04:32:38 -06:00
|
|
|
# If the max stream ordering is less than the min stream ordering,
|
|
|
|
# then obviously there are zero push actions in that range.
|
|
|
|
if max_stream_ordering <= stream_ordering:
|
|
|
|
return 0, 0
|
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
sql = f"""
|
|
|
|
SELECT
|
|
|
|
COUNT(CASE WHEN notif = 1 THEN 1 END),
|
|
|
|
COUNT(CASE WHEN unread = 1 THEN 1 END)
|
|
|
|
FROM event_push_actions ea
|
|
|
|
WHERE user_id = ?
|
|
|
|
AND room_id = ?
|
|
|
|
AND ea.stream_ordering > ?
|
|
|
|
{clause}
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, args)
|
|
|
|
row = txn.fetchone()
|
|
|
|
|
|
|
|
if row:
|
|
|
|
return cast(Tuple[int, int], row)
|
|
|
|
|
|
|
|
return 0, 0
|
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
async def get_push_action_users_in_range(
|
2021-12-21 06:25:34 -07:00
|
|
|
self, min_stream_ordering: int, max_stream_ordering: int
|
|
|
|
) -> List[str]:
|
|
|
|
def f(txn: LoggingTransaction) -> List[str]:
|
2016-04-06 08:42:15 -06:00
|
|
|
sql = (
|
|
|
|
"SELECT DISTINCT(user_id) FROM event_push_actions WHERE"
|
2020-09-08 08:26:06 -06:00
|
|
|
" stream_ordering >= ? AND stream_ordering <= ? AND notif = 1"
|
2016-04-06 08:42:15 -06:00
|
|
|
)
|
|
|
|
txn.execute(sql, (min_stream_ordering, max_stream_ordering))
|
2017-03-23 11:53:49 -06:00
|
|
|
return [r[0] for r in txn]
|
2019-04-03 03:07:29 -06:00
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
return await self.db_pool.runInteraction("get_push_action_users_in_range", f)
|
2016-04-06 08:42:15 -06:00
|
|
|
|
2022-08-24 03:12:51 -06:00
|
|
|
def _get_receipts_by_room_txn(
|
|
|
|
self, txn: LoggingTransaction, user_id: str
|
2022-09-26 12:28:12 -06:00
|
|
|
) -> Dict[str, int]:
|
|
|
|
"""
|
|
|
|
Generate a map of room ID to the latest stream ordering that has been
|
|
|
|
read by the given user.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn:
|
|
|
|
user_id: The user to fetch receipts for.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A map of room ID to stream ordering for all rooms the user has a receipt in.
|
|
|
|
"""
|
2022-08-24 03:12:51 -06:00
|
|
|
receipt_types_clause, args = make_in_list_sql_clause(
|
|
|
|
self.database_engine,
|
|
|
|
"receipt_type",
|
2022-09-29 05:07:31 -06:00
|
|
|
(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
|
2022-08-24 03:12:51 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
sql = f"""
|
|
|
|
SELECT room_id, MAX(stream_ordering)
|
|
|
|
FROM receipts_linearized
|
|
|
|
INNER JOIN events USING (room_id, event_id)
|
|
|
|
WHERE {receipt_types_clause}
|
|
|
|
AND user_id = ?
|
|
|
|
GROUP BY room_id
|
|
|
|
"""
|
|
|
|
|
|
|
|
args.extend((user_id,))
|
|
|
|
txn.execute(sql, args)
|
2022-09-26 12:28:12 -06:00
|
|
|
return {
|
|
|
|
room_id: latest_stream_ordering
|
|
|
|
for room_id, latest_stream_ordering in txn.fetchall()
|
|
|
|
}
|
2022-08-24 03:12:51 -06:00
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
async def get_unread_push_actions_for_user_in_range_for_http(
|
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
min_stream_ordering: int,
|
|
|
|
max_stream_ordering: int,
|
|
|
|
limit: int = 20,
|
2021-11-30 04:49:20 -07:00
|
|
|
) -> List[HttpPushAction]:
|
2016-07-28 13:24:24 -06:00
|
|
|
"""Get a list of the most recent unread push actions for a given user,
|
|
|
|
within the given stream ordering range. Called by the httppusher.
|
|
|
|
|
|
|
|
Args:
|
2020-07-30 05:20:41 -06:00
|
|
|
user_id: The user to fetch push actions for.
|
|
|
|
min_stream_ordering: The exclusive lower bound on the
|
2016-07-28 13:24:24 -06:00
|
|
|
stream ordering of event push actions to fetch.
|
2020-07-30 05:20:41 -06:00
|
|
|
max_stream_ordering: The inclusive upper bound on the
|
2016-07-28 13:24:24 -06:00
|
|
|
stream ordering of event push actions to fetch.
|
2020-07-30 05:20:41 -06:00
|
|
|
limit: The maximum number of rows to return.
|
2016-07-28 13:24:24 -06:00
|
|
|
Returns:
|
2020-07-30 05:20:41 -06:00
|
|
|
A list of dicts with the keys "event_id", "room_id", "stream_ordering", "actions".
|
2016-07-28 13:24:24 -06:00
|
|
|
The list will be ordered by ascending stream_ordering.
|
|
|
|
The list will have between 0~limit entries.
|
|
|
|
"""
|
2022-08-05 09:09:33 -06:00
|
|
|
|
2022-09-26 12:28:12 -06:00
|
|
|
receipts_by_room = await self.db_pool.runInteraction(
|
|
|
|
"get_unread_push_actions_for_user_in_range_http_receipts",
|
|
|
|
self._get_receipts_by_room_txn,
|
|
|
|
user_id=user_id,
|
2016-04-13 07:16:45 -06:00
|
|
|
)
|
|
|
|
|
2022-08-24 03:12:51 -06:00
|
|
|
def get_push_actions_txn(
|
2021-12-21 06:25:34 -07:00
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> List[Tuple[str, str, int, str, bool]]:
|
2022-08-24 03:12:51 -06:00
|
|
|
sql = """
|
|
|
|
SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight
|
2022-08-04 13:24:44 -06:00
|
|
|
FROM event_push_actions AS ep
|
|
|
|
WHERE
|
2022-08-24 03:12:51 -06:00
|
|
|
ep.user_id = ?
|
2022-08-04 13:24:44 -06:00
|
|
|
AND ep.stream_ordering > ?
|
|
|
|
AND ep.stream_ordering <= ?
|
|
|
|
AND ep.notif = 1
|
|
|
|
ORDER BY ep.stream_ordering ASC LIMIT ?
|
|
|
|
"""
|
2022-08-24 03:12:51 -06:00
|
|
|
txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit))
|
2021-12-29 06:04:28 -07:00
|
|
|
return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall())
|
2019-04-03 03:07:29 -06:00
|
|
|
|
2022-08-24 03:12:51 -06:00
|
|
|
push_actions = await self.db_pool.runInteraction(
|
|
|
|
"get_unread_push_actions_for_user_in_range_http", get_push_actions_txn
|
2016-07-28 13:24:24 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
notifs = [
|
2021-12-21 06:25:34 -07:00
|
|
|
HttpPushAction(
|
2022-08-24 03:12:51 -06:00
|
|
|
event_id=event_id,
|
|
|
|
room_id=room_id,
|
|
|
|
stream_ordering=stream_ordering,
|
|
|
|
actions=_deserialize_action(actions, highlight),
|
2021-12-21 06:25:34 -07:00
|
|
|
)
|
2022-08-24 03:12:51 -06:00
|
|
|
for event_id, room_id, stream_ordering, actions, highlight in push_actions
|
|
|
|
# Only include push actions with a stream ordering after any receipt, or without any
|
|
|
|
# receipt present (invited to but never read rooms).
|
|
|
|
if stream_ordering > receipts_by_room.get(room_id, 0)
|
2016-07-28 13:24:24 -06:00
|
|
|
]
|
|
|
|
|
|
|
|
# Now sort it so it's ordered correctly, since currently it will
|
|
|
|
# contain results from the first query, correctly ordered, followed
|
|
|
|
# by results from the second query, but we want them all ordered
|
|
|
|
# by stream_ordering, oldest first.
|
2021-12-21 06:25:34 -07:00
|
|
|
notifs.sort(key=lambda r: r.stream_ordering)
|
2016-07-28 13:24:24 -06:00
|
|
|
|
|
|
|
# Take only up to the limit. We have to stop at the limit because
|
|
|
|
# one of the subqueries may have hit the limit.
|
2019-07-23 07:00:55 -06:00
|
|
|
return notifs[:limit]
|
2016-07-28 13:24:24 -06:00
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
async def get_unread_push_actions_for_user_in_range_for_email(
|
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
min_stream_ordering: int,
|
|
|
|
max_stream_ordering: int,
|
|
|
|
limit: int = 20,
|
2021-11-30 04:49:20 -07:00
|
|
|
) -> List[EmailPushAction]:
|
2016-07-28 09:47:37 -06:00
|
|
|
"""Get a list of the most recent unread push actions for a given user,
|
2016-07-28 13:24:24 -06:00
|
|
|
within the given stream ordering range. Called by the emailpusher
|
2016-07-28 09:47:37 -06:00
|
|
|
|
|
|
|
Args:
|
2020-07-30 05:20:41 -06:00
|
|
|
user_id: The user to fetch push actions for.
|
|
|
|
min_stream_ordering: The exclusive lower bound on the
|
2016-07-28 13:24:24 -06:00
|
|
|
stream ordering of event push actions to fetch.
|
2020-07-30 05:20:41 -06:00
|
|
|
max_stream_ordering: The inclusive upper bound on the
|
2016-07-28 13:24:24 -06:00
|
|
|
stream ordering of event push actions to fetch.
|
2020-07-30 05:20:41 -06:00
|
|
|
limit: The maximum number of rows to return.
|
2016-07-28 09:47:37 -06:00
|
|
|
Returns:
|
2020-07-30 05:20:41 -06:00
|
|
|
A list of dicts with the keys "event_id", "room_id", "stream_ordering", "actions", "received_ts".
|
2016-07-28 13:24:24 -06:00
|
|
|
The list will be ordered by descending received_ts.
|
2016-07-28 09:47:37 -06:00
|
|
|
The list will have between 0~limit entries.
|
|
|
|
"""
|
2022-08-05 09:09:33 -06:00
|
|
|
|
2022-09-26 12:28:12 -06:00
|
|
|
receipts_by_room = await self.db_pool.runInteraction(
|
|
|
|
"get_unread_push_actions_for_user_in_range_email_receipts",
|
|
|
|
self._get_receipts_by_room_txn,
|
|
|
|
user_id=user_id,
|
2016-04-13 07:16:45 -06:00
|
|
|
)
|
|
|
|
|
2022-08-24 03:12:51 -06:00
|
|
|
def get_push_actions_txn(
|
2021-12-21 06:25:34 -07:00
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> List[Tuple[str, str, int, str, bool, int]]:
|
2022-08-24 03:12:51 -06:00
|
|
|
sql = """
|
2022-08-04 13:24:44 -06:00
|
|
|
SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,
|
|
|
|
ep.highlight, e.received_ts
|
|
|
|
FROM event_push_actions AS ep
|
|
|
|
INNER JOIN events AS e USING (room_id, event_id)
|
|
|
|
WHERE
|
2022-08-24 03:12:51 -06:00
|
|
|
ep.user_id = ?
|
2022-08-04 13:24:44 -06:00
|
|
|
AND ep.stream_ordering > ?
|
|
|
|
AND ep.stream_ordering <= ?
|
|
|
|
AND ep.notif = 1
|
|
|
|
ORDER BY ep.stream_ordering DESC LIMIT ?
|
|
|
|
"""
|
2022-08-24 03:12:51 -06:00
|
|
|
txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit))
|
2021-12-29 06:04:28 -07:00
|
|
|
return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall())
|
2019-04-03 03:07:29 -06:00
|
|
|
|
2022-08-24 03:12:51 -06:00
|
|
|
push_actions = await self.db_pool.runInteraction(
|
|
|
|
"get_unread_push_actions_for_user_in_range_email", get_push_actions_txn
|
2016-04-13 07:16:45 -06:00
|
|
|
)
|
|
|
|
|
2016-06-22 11:07:14 -06:00
|
|
|
# Make a list of dicts from the two sets of results.
|
|
|
|
notifs = [
|
2021-12-21 06:25:34 -07:00
|
|
|
EmailPushAction(
|
2022-08-24 03:12:51 -06:00
|
|
|
event_id=event_id,
|
|
|
|
room_id=room_id,
|
|
|
|
stream_ordering=stream_ordering,
|
|
|
|
actions=_deserialize_action(actions, highlight),
|
|
|
|
received_ts=received_ts,
|
2021-12-21 06:25:34 -07:00
|
|
|
)
|
2022-08-24 03:12:51 -06:00
|
|
|
for event_id, room_id, stream_ordering, actions, highlight, received_ts in push_actions
|
|
|
|
# Only include push actions with a stream ordering after any receipt, or without any
|
|
|
|
# receipt present (invited to but never read rooms).
|
|
|
|
if stream_ordering > receipts_by_room.get(room_id, 0)
|
2016-06-22 11:07:14 -06:00
|
|
|
]
|
|
|
|
|
|
|
|
# Now sort it so it's ordered correctly, since currently it will
|
|
|
|
# contain results from the first query, correctly ordered, followed
|
|
|
|
# by results from the second query, but we want them all ordered
|
2016-07-28 09:47:37 -06:00
|
|
|
# by received_ts (most recent first)
|
2021-12-21 06:25:34 -07:00
|
|
|
notifs.sort(key=lambda r: -(r.received_ts or 0))
|
2016-06-22 11:07:14 -06:00
|
|
|
|
|
|
|
# Now return the first `limit`
|
2019-07-23 07:00:55 -06:00
|
|
|
return notifs[:limit]
|
2016-04-06 08:42:15 -06:00
|
|
|
|
2020-08-28 09:34:50 -06:00
|
|
|
async def get_if_maybe_push_in_range_for_user(
|
|
|
|
self, user_id: str, min_stream_ordering: int
|
|
|
|
) -> bool:
|
2019-04-02 09:45:33 -06:00
|
|
|
"""A fast check to see if there might be something to push for the
|
|
|
|
user since the given stream ordering. May return false positives.
|
|
|
|
|
|
|
|
Useful to know whether to bother starting a pusher on start up or not.
|
|
|
|
|
|
|
|
Args:
|
2020-08-28 09:34:50 -06:00
|
|
|
user_id
|
|
|
|
min_stream_ordering
|
2019-04-02 09:45:33 -06:00
|
|
|
|
|
|
|
Returns:
|
2020-08-28 09:34:50 -06:00
|
|
|
True if there may be push to process, False if there definitely isn't.
|
2019-04-02 09:45:33 -06:00
|
|
|
"""
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
def _get_if_maybe_push_in_range_for_user_txn(txn: LoggingTransaction) -> bool:
|
2019-04-02 09:45:33 -06:00
|
|
|
sql = """
|
|
|
|
SELECT 1 FROM event_push_actions
|
2020-09-02 10:19:37 -06:00
|
|
|
WHERE user_id = ? AND stream_ordering > ? AND notif = 1
|
2019-04-02 09:45:33 -06:00
|
|
|
LIMIT 1
|
|
|
|
"""
|
|
|
|
|
2019-04-03 03:07:29 -06:00
|
|
|
txn.execute(sql, (user_id, min_stream_ordering))
|
2019-04-02 09:45:33 -06:00
|
|
|
return bool(txn.fetchone())
|
|
|
|
|
2020-08-28 09:34:50 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2019-04-02 09:45:33 -06:00
|
|
|
"get_if_maybe_push_in_range_for_user",
|
|
|
|
_get_if_maybe_push_in_range_for_user_txn,
|
|
|
|
)
|
|
|
|
|
2020-08-28 09:34:50 -06:00
|
|
|
async def add_push_actions_to_staging(
|
2020-09-02 10:19:37 -06:00
|
|
|
self,
|
|
|
|
event_id: str,
|
2022-08-16 05:22:17 -06:00
|
|
|
user_id_actions: Dict[str, Collection[Union[Mapping, str]]],
|
2020-09-02 10:19:37 -06:00
|
|
|
count_as_unread: bool,
|
2022-09-14 11:11:16 -06:00
|
|
|
thread_id: str,
|
2020-08-28 09:34:50 -06:00
|
|
|
) -> None:
|
2018-02-27 05:01:36 -07:00
|
|
|
"""Add the push actions for the event to the push action staging area.
|
|
|
|
|
|
|
|
Args:
|
2020-08-28 09:34:50 -06:00
|
|
|
event_id
|
|
|
|
user_id_actions: A mapping of user_id to list of push actions, where
|
|
|
|
an action can either be a string or dict.
|
2020-09-02 10:19:37 -06:00
|
|
|
count_as_unread: Whether this event should increment unread counts.
|
2022-09-14 11:11:16 -06:00
|
|
|
thread_id: The thread this event is parent of, if applicable.
|
2018-02-27 05:01:36 -07:00
|
|
|
"""
|
|
|
|
if not user_id_actions:
|
|
|
|
return
|
|
|
|
|
|
|
|
# This is a helper function for generating the necessary tuple that
|
2020-09-02 10:19:37 -06:00
|
|
|
# can be used to insert into the `event_push_actions_staging` table.
|
2021-12-21 06:25:34 -07:00
|
|
|
def _gen_entry(
|
2022-08-16 05:22:17 -06:00
|
|
|
user_id: str, actions: Collection[Union[Mapping, str]]
|
2022-09-14 11:11:16 -06:00
|
|
|
) -> Tuple[str, str, str, int, int, int, str]:
|
2018-02-27 05:01:36 -07:00
|
|
|
is_highlight = 1 if _action_has_highlight(actions) else 0
|
2020-09-02 10:19:37 -06:00
|
|
|
notif = 1 if "notify" in actions else 0
|
2018-02-27 05:01:36 -07:00
|
|
|
return (
|
|
|
|
event_id, # event_id column
|
|
|
|
user_id, # user_id column
|
2021-12-21 06:25:34 -07:00
|
|
|
_serialize_action(actions, bool(is_highlight)), # actions column
|
2020-09-02 10:19:37 -06:00
|
|
|
notif, # notif column
|
2018-02-27 05:01:36 -07:00
|
|
|
is_highlight, # highlight column
|
2020-09-02 10:19:37 -06:00
|
|
|
int(count_as_unread), # unread column
|
2022-09-14 11:11:16 -06:00
|
|
|
thread_id, # thread_id column
|
2018-02-27 05:01:36 -07:00
|
|
|
)
|
|
|
|
|
2022-08-30 05:12:48 -06:00
|
|
|
await self.db_pool.simple_insert_many(
|
|
|
|
"event_push_actions_staging",
|
2022-09-14 11:11:16 -06:00
|
|
|
keys=(
|
|
|
|
"event_id",
|
|
|
|
"user_id",
|
|
|
|
"actions",
|
|
|
|
"notif",
|
|
|
|
"highlight",
|
|
|
|
"unread",
|
|
|
|
"thread_id",
|
|
|
|
),
|
2022-08-30 05:12:48 -06:00
|
|
|
values=[
|
|
|
|
_gen_entry(user_id, actions)
|
|
|
|
for user_id, actions in user_id_actions.items()
|
|
|
|
],
|
|
|
|
desc="add_push_actions_to_staging",
|
2018-02-27 05:01:36 -07:00
|
|
|
)
|
|
|
|
|
2020-07-30 05:20:41 -06:00
|
|
|
async def remove_push_actions_from_staging(self, event_id: str) -> None:
|
2018-02-27 05:01:36 -07:00
|
|
|
"""Called if we failed to persist the event to ensure that stale push
|
|
|
|
actions don't build up in the DB
|
|
|
|
"""
|
|
|
|
|
2018-04-27 04:07:40 -06:00
|
|
|
try:
|
2021-12-21 06:25:34 -07:00
|
|
|
await self.db_pool.simple_delete(
|
2018-04-27 04:07:40 -06:00
|
|
|
table="event_push_actions_staging",
|
2019-04-03 03:07:29 -06:00
|
|
|
keyvalues={"event_id": event_id},
|
2018-04-27 04:07:40 -06:00
|
|
|
desc="remove_push_actions_from_staging",
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
# this method is called from an exception handler, so propagating
|
|
|
|
# another exception here really isn't helpful - there's nothing
|
|
|
|
# the caller can do about it. Just log the exception and move on.
|
|
|
|
logger.exception(
|
2019-04-03 03:07:29 -06:00
|
|
|
"Error removing push actions after event persistence failure"
|
2018-04-27 04:07:40 -06:00
|
|
|
)
|
2018-02-27 05:01:36 -07:00
|
|
|
|
2020-10-09 05:37:51 -06:00
|
|
|
@wrap_as_background_process("event_push_action_stream_orderings")
|
|
|
|
async def _find_stream_orderings_for_times(self) -> None:
|
|
|
|
await self.db_pool.runInteraction(
|
2018-03-01 07:05:41 -07:00
|
|
|
"_find_stream_orderings_for_times",
|
2018-07-25 02:41:12 -06:00
|
|
|
self._find_stream_orderings_for_times_txn,
|
2018-03-01 07:05:41 -07:00
|
|
|
)
|
|
|
|
|
2020-10-09 05:37:51 -06:00
|
|
|
def _find_stream_orderings_for_times_txn(self, txn: LoggingTransaction) -> None:
|
2018-03-01 07:05:41 -07:00
|
|
|
logger.info("Searching for stream ordering 1 month ago")
|
|
|
|
self.stream_ordering_month_ago = self._find_first_stream_ordering_after_ts_txn(
|
|
|
|
txn, self._clock.time_msec() - 30 * 24 * 60 * 60 * 1000
|
|
|
|
)
|
|
|
|
logger.info(
|
2019-04-03 03:07:29 -06:00
|
|
|
"Found stream ordering 1 month ago: it's %d", self.stream_ordering_month_ago
|
2018-03-01 07:05:41 -07:00
|
|
|
)
|
|
|
|
logger.info("Searching for stream ordering 1 day ago")
|
|
|
|
self.stream_ordering_day_ago = self._find_first_stream_ordering_after_ts_txn(
|
|
|
|
txn, self._clock.time_msec() - 24 * 60 * 60 * 1000
|
|
|
|
)
|
|
|
|
logger.info(
|
2019-04-03 03:07:29 -06:00
|
|
|
"Found stream ordering 1 day ago: it's %d", self.stream_ordering_day_ago
|
2018-03-01 07:05:41 -07:00
|
|
|
)
|
|
|
|
|
2020-08-28 09:34:50 -06:00
|
|
|
async def find_first_stream_ordering_after_ts(self, ts: int) -> int:
|
2018-03-05 04:53:39 -07:00
|
|
|
"""Gets the stream ordering corresponding to a given timestamp.
|
|
|
|
|
|
|
|
Specifically, finds the stream_ordering of the first event that was
|
2018-03-05 04:47:48 -07:00
|
|
|
received on or after the timestamp. This is done by a binary search on
|
|
|
|
the events table, since there is no index on received_ts, so is
|
2018-03-05 04:53:39 -07:00
|
|
|
relatively slow.
|
|
|
|
|
|
|
|
Args:
|
2020-08-28 09:34:50 -06:00
|
|
|
ts: timestamp in millis
|
2018-03-05 04:53:39 -07:00
|
|
|
|
|
|
|
Returns:
|
2020-08-28 09:34:50 -06:00
|
|
|
stream ordering of the first event received on/after the timestamp
|
2018-03-01 07:05:41 -07:00
|
|
|
"""
|
2020-08-28 09:34:50 -06:00
|
|
|
return await self.db_pool.runInteraction(
|
2018-03-05 04:53:39 -07:00
|
|
|
"_find_first_stream_ordering_after_ts_txn",
|
|
|
|
self._find_first_stream_ordering_after_ts_txn,
|
|
|
|
ts,
|
|
|
|
)
|
|
|
|
|
2018-03-05 04:47:48 -07:00
|
|
|
@staticmethod
|
2021-12-21 06:25:34 -07:00
|
|
|
def _find_first_stream_ordering_after_ts_txn(
|
|
|
|
txn: LoggingTransaction, ts: int
|
|
|
|
) -> int:
|
2018-03-01 07:05:41 -07:00
|
|
|
"""
|
2018-03-05 04:47:48 -07:00
|
|
|
Find the stream_ordering of the first event that was received on or
|
|
|
|
after a given timestamp. This is relatively slow as there is no index
|
|
|
|
on received_ts but we can then use this to delete push actions before
|
2018-03-01 07:05:41 -07:00
|
|
|
this.
|
|
|
|
|
|
|
|
received_ts must necessarily be in the same order as stream_ordering
|
|
|
|
and stream_ordering is indexed, so we manually binary search using
|
|
|
|
stream_ordering
|
2018-03-05 04:47:48 -07:00
|
|
|
|
|
|
|
Args:
|
2021-12-21 06:25:34 -07:00
|
|
|
txn:
|
|
|
|
ts: timestamp to search for
|
2018-03-05 04:47:48 -07:00
|
|
|
|
|
|
|
Returns:
|
2021-12-21 06:25:34 -07:00
|
|
|
The stream ordering
|
2018-03-01 07:05:41 -07:00
|
|
|
"""
|
|
|
|
txn.execute("SELECT MAX(stream_ordering) FROM events")
|
2021-12-29 06:04:28 -07:00
|
|
|
max_stream_ordering = cast(Tuple[Optional[int]], txn.fetchone())[0]
|
2018-03-01 07:05:41 -07:00
|
|
|
|
|
|
|
if max_stream_ordering is None:
|
|
|
|
return 0
|
|
|
|
|
2018-03-05 04:47:48 -07:00
|
|
|
# We want the first stream_ordering in which received_ts is greater
|
|
|
|
# than or equal to ts. Call this point X.
|
|
|
|
#
|
|
|
|
# We maintain the invariants:
|
|
|
|
#
|
|
|
|
# range_start <= X <= range_end
|
|
|
|
#
|
2018-03-01 07:05:41 -07:00
|
|
|
range_start = 0
|
2018-03-05 04:47:48 -07:00
|
|
|
range_end = max_stream_ordering + 1
|
|
|
|
|
|
|
|
# Given a stream_ordering, look up the timestamp at that
|
|
|
|
# stream_ordering.
|
|
|
|
#
|
|
|
|
# The array may be sparse (we may be missing some stream_orderings).
|
|
|
|
# We treat the gaps as the same as having the same value as the
|
|
|
|
# preceding entry, because we will pick the lowest stream_ordering
|
|
|
|
# which satisfies our requirement of received_ts >= ts.
|
|
|
|
#
|
|
|
|
# For example, if our array of events indexed by stream_ordering is
|
|
|
|
# [10, <none>, 20], we should treat this as being equivalent to
|
|
|
|
# [10, 10, 20].
|
|
|
|
#
|
2022-08-04 13:24:44 -06:00
|
|
|
sql = """
|
|
|
|
SELECT received_ts FROM events
|
|
|
|
WHERE stream_ordering <= ?
|
|
|
|
ORDER BY stream_ordering DESC
|
|
|
|
LIMIT 1
|
|
|
|
"""
|
2018-03-01 07:05:41 -07:00
|
|
|
|
2018-03-05 04:47:48 -07:00
|
|
|
while range_end - range_start > 0:
|
|
|
|
middle = (range_end + range_start) // 2
|
2018-03-01 07:05:41 -07:00
|
|
|
txn.execute(sql, (middle,))
|
2018-03-05 04:47:48 -07:00
|
|
|
row = txn.fetchone()
|
|
|
|
if row is None:
|
|
|
|
# no rows with stream_ordering<=middle
|
|
|
|
range_start = middle + 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
middle_ts = row[0]
|
2018-03-01 07:05:41 -07:00
|
|
|
if ts > middle_ts:
|
2018-03-05 04:47:48 -07:00
|
|
|
# we got a timestamp lower than the one we were looking for.
|
|
|
|
# definitely need to look higher: X > middle.
|
|
|
|
range_start = middle + 1
|
2018-03-01 07:05:41 -07:00
|
|
|
else:
|
2018-03-05 04:47:48 -07:00
|
|
|
# we got a timestamp higher than (or the same as) the one we
|
|
|
|
# were looking for. We aren't yet sure about the point we
|
|
|
|
# looked up, but we can be sure that X <= middle.
|
2018-03-01 07:05:41 -07:00
|
|
|
range_end = middle
|
|
|
|
|
|
|
|
return range_end
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
async def get_time_of_last_push_action_before(
|
|
|
|
self, stream_ordering: int
|
|
|
|
) -> Optional[int]:
|
|
|
|
def f(txn: LoggingTransaction) -> Optional[Tuple[int]]:
|
2022-08-04 13:24:44 -06:00
|
|
|
sql = """
|
|
|
|
SELECT e.received_ts
|
|
|
|
FROM event_push_actions AS ep
|
|
|
|
JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id
|
|
|
|
WHERE ep.stream_ordering > ? AND notif = 1
|
|
|
|
ORDER BY ep.stream_ordering ASC
|
|
|
|
LIMIT 1
|
|
|
|
"""
|
2020-03-09 07:58:38 -06:00
|
|
|
txn.execute(sql, (stream_ordering,))
|
2021-12-29 06:04:28 -07:00
|
|
|
return cast(Optional[Tuple[int]], txn.fetchone())
|
2020-03-09 07:58:38 -06:00
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
result = await self.db_pool.runInteraction(
|
|
|
|
"get_time_of_last_push_action_before", f
|
|
|
|
)
|
2020-03-09 07:58:38 -06:00
|
|
|
return result[0] if result else None
|
|
|
|
|
2020-10-09 05:37:51 -06:00
|
|
|
@wrap_as_background_process("rotate_notifs")
|
2021-12-21 06:25:34 -07:00
|
|
|
async def _rotate_notifs(self) -> None:
|
2017-02-03 11:12:53 -07:00
|
|
|
if self._doing_notif_rotation or self.stream_ordering_day_ago is None:
|
|
|
|
return
|
|
|
|
self._doing_notif_rotation = True
|
|
|
|
|
|
|
|
try:
|
2022-06-28 06:13:44 -06:00
|
|
|
# First we recalculate push summaries and delete stale push actions
|
|
|
|
# for rooms/users with new receipts.
|
|
|
|
while True:
|
|
|
|
logger.debug("Handling new receipts")
|
|
|
|
|
|
|
|
caught_up = await self.db_pool.runInteraction(
|
|
|
|
"_handle_new_receipts_for_notifs_txn",
|
|
|
|
self._handle_new_receipts_for_notifs_txn,
|
|
|
|
)
|
|
|
|
if caught_up:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Then we update the event push summaries for any new events
|
2017-02-03 11:12:53 -07:00
|
|
|
while True:
|
|
|
|
logger.info("Rotating notifications")
|
|
|
|
|
2020-08-05 14:38:57 -06:00
|
|
|
caught_up = await self.db_pool.runInteraction(
|
2019-04-03 03:07:29 -06:00
|
|
|
"_rotate_notifs", self._rotate_notifs_txn
|
2017-02-03 11:12:53 -07:00
|
|
|
)
|
|
|
|
if caught_up:
|
|
|
|
break
|
2022-06-15 09:17:14 -06:00
|
|
|
|
2022-06-28 06:13:44 -06:00
|
|
|
# Finally we clear out old event push actions.
|
2022-06-15 09:17:14 -06:00
|
|
|
await self._remove_old_push_actions_that_have_rotated()
|
2017-02-03 11:12:53 -07:00
|
|
|
finally:
|
|
|
|
self._doing_notif_rotation = False
|
|
|
|
|
2022-06-28 06:13:44 -06:00
|
|
|
def _handle_new_receipts_for_notifs_txn(self, txn: LoggingTransaction) -> bool:
|
|
|
|
"""Check for new read receipts and delete from event push actions.
|
|
|
|
|
|
|
|
Any push actions which predate the user's most recent read receipt are
|
|
|
|
now redundant, so we can remove them from `event_push_actions` and
|
|
|
|
update `event_push_summary`.
|
2022-08-04 13:24:44 -06:00
|
|
|
|
|
|
|
Returns true if all new receipts have been processed.
|
2022-06-28 06:13:44 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
limit = 100
|
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
# The (inclusive) receipt stream ID that was previously processed..
|
2022-06-30 08:08:40 -06:00
|
|
|
min_receipts_stream_id = self.db_pool.simple_select_one_onecol_txn(
|
2022-06-28 06:13:44 -06:00
|
|
|
txn,
|
|
|
|
table="event_push_summary_last_receipt_stream_id",
|
|
|
|
keyvalues={},
|
|
|
|
retcol="stream_id",
|
|
|
|
)
|
|
|
|
|
2022-06-30 08:08:40 -06:00
|
|
|
max_receipts_stream_id = self._receipts_id_gen.get_current_token()
|
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
# The (inclusive) event stream ordering that was previously summarised.
|
|
|
|
old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
|
|
|
|
txn,
|
|
|
|
table="event_push_summary_stream_ordering",
|
|
|
|
keyvalues={},
|
|
|
|
retcol="stream_ordering",
|
|
|
|
)
|
|
|
|
|
2022-06-28 06:13:44 -06:00
|
|
|
sql = """
|
2022-09-28 09:01:41 -06:00
|
|
|
SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering
|
2022-06-28 06:13:44 -06:00
|
|
|
FROM receipts_linearized AS r
|
|
|
|
INNER JOIN events AS e USING (event_id)
|
2022-06-30 08:08:40 -06:00
|
|
|
WHERE ? < r.stream_id AND r.stream_id <= ? AND user_id LIKE ?
|
2022-06-28 06:13:44 -06:00
|
|
|
ORDER BY r.stream_id ASC
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We only want local users, so we add a dodgy filter to the above query
|
|
|
|
# and recheck it below.
|
|
|
|
user_filter = "%:" + self.hs.hostname
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
sql,
|
|
|
|
(
|
2022-06-30 08:08:40 -06:00
|
|
|
min_receipts_stream_id,
|
|
|
|
max_receipts_stream_id,
|
2022-06-28 06:13:44 -06:00
|
|
|
user_filter,
|
|
|
|
limit,
|
|
|
|
),
|
|
|
|
)
|
2022-09-29 05:22:41 -06:00
|
|
|
rows = cast(List[Tuple[int, str, str, int]], txn.fetchall())
|
2022-06-28 06:13:44 -06:00
|
|
|
|
|
|
|
# For each new read receipt we delete push actions from before it and
|
|
|
|
# recalculate the summary.
|
2022-09-28 09:01:41 -06:00
|
|
|
for _, room_id, user_id, stream_ordering in rows:
|
2022-06-28 06:13:44 -06:00
|
|
|
# Only handle our own read receipts.
|
|
|
|
if not self.hs.is_mine_id(user_id):
|
|
|
|
continue
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
DELETE FROM event_push_actions
|
|
|
|
WHERE room_id = ?
|
|
|
|
AND user_id = ?
|
|
|
|
AND stream_ordering <= ?
|
|
|
|
AND highlight = 0
|
|
|
|
""",
|
|
|
|
(room_id, user_id, stream_ordering),
|
|
|
|
)
|
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
# Fetch the notification counts between the stream ordering of the
|
|
|
|
# latest receipt and what was previously summarised.
|
2022-06-28 06:13:44 -06:00
|
|
|
notif_count, unread_count = self._get_notif_unread_count_for_user_room(
|
|
|
|
txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering
|
|
|
|
)
|
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
# Replace the previous summary with the new counts.
|
2022-09-14 11:11:16 -06:00
|
|
|
#
|
|
|
|
# TODO(threads): Upsert per-thread instead of setting them all to main.
|
2022-06-28 06:13:44 -06:00
|
|
|
self.db_pool.simple_upsert_txn(
|
|
|
|
txn,
|
|
|
|
table="event_push_summary",
|
|
|
|
keyvalues={"room_id": room_id, "user_id": user_id},
|
|
|
|
values={
|
|
|
|
"notif_count": notif_count,
|
|
|
|
"unread_count": unread_count,
|
|
|
|
"stream_ordering": old_rotate_stream_ordering,
|
|
|
|
"last_receipt_stream_ordering": stream_ordering,
|
2022-09-14 11:11:16 -06:00
|
|
|
"thread_id": "main",
|
2022-06-28 06:13:44 -06:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
# We always update `event_push_summary_last_receipt_stream_id` to
|
|
|
|
# ensure that we don't rescan the same receipts for remote users.
|
2022-06-30 08:08:40 -06:00
|
|
|
|
2022-09-29 05:22:41 -06:00
|
|
|
receipts_last_processed_stream_id = max_receipts_stream_id
|
2022-06-30 08:08:40 -06:00
|
|
|
if len(rows) >= limit:
|
|
|
|
# If we pulled out a limited number of rows we only update the
|
|
|
|
# position to the last receipt we processed, so we continue
|
|
|
|
# processing the rest next iteration.
|
2022-09-29 05:22:41 -06:00
|
|
|
receipts_last_processed_stream_id = rows[-1][0]
|
2022-06-30 08:08:40 -06:00
|
|
|
|
|
|
|
self.db_pool.simple_update_txn(
|
|
|
|
txn,
|
|
|
|
table="event_push_summary_last_receipt_stream_id",
|
|
|
|
keyvalues={},
|
2022-09-29 05:22:41 -06:00
|
|
|
updatevalues={"stream_id": receipts_last_processed_stream_id},
|
2022-06-28 06:13:44 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
return len(rows) < limit
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
def _rotate_notifs_txn(self, txn: LoggingTransaction) -> bool:
|
2022-08-04 13:24:44 -06:00
|
|
|
"""Archives older notifications (from event_push_actions) into event_push_summary.
|
|
|
|
|
|
|
|
Returns whether the archiving process has caught up or not.
|
2017-02-03 11:12:53 -07:00
|
|
|
"""
|
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
# The (inclusive) event stream ordering that was previously summarised.
|
2020-08-05 14:38:57 -06:00
|
|
|
old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
|
2017-02-18 07:41:31 -07:00
|
|
|
txn,
|
|
|
|
table="event_push_summary_stream_ordering",
|
|
|
|
keyvalues={},
|
|
|
|
retcol="stream_ordering",
|
|
|
|
)
|
|
|
|
|
2017-02-03 11:12:53 -07:00
|
|
|
# We don't to try and rotate millions of rows at once, so we cap the
|
|
|
|
# maximum stream ordering we'll rotate before.
|
2019-04-03 03:07:29 -06:00
|
|
|
txn.execute(
|
|
|
|
"""
|
2017-02-03 11:12:53 -07:00
|
|
|
SELECT stream_ordering FROM event_push_actions
|
2017-02-18 07:41:31 -07:00
|
|
|
WHERE stream_ordering > ?
|
2018-02-14 06:37:56 -07:00
|
|
|
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
|
2022-08-04 13:24:44 -06:00
|
|
|
""",
|
2019-04-03 03:07:29 -06:00
|
|
|
(old_rotate_stream_ordering, self._rotate_count),
|
|
|
|
)
|
2017-02-03 11:12:53 -07:00
|
|
|
stream_row = txn.fetchone()
|
|
|
|
if stream_row:
|
2019-10-31 09:43:24 -06:00
|
|
|
(offset_stream_ordering,) = stream_row
|
2022-07-04 09:02:21 -06:00
|
|
|
|
|
|
|
# We need to bound by the current token to ensure that we handle
|
|
|
|
# out-of-order writes correctly.
|
|
|
|
rotate_to_stream_ordering = min(
|
|
|
|
offset_stream_ordering, self._stream_id_gen.get_current_token()
|
|
|
|
)
|
2022-06-15 09:17:14 -06:00
|
|
|
caught_up = False
|
2017-02-03 11:12:53 -07:00
|
|
|
else:
|
2022-06-15 09:17:14 -06:00
|
|
|
rotate_to_stream_ordering = self._stream_id_gen.get_current_token()
|
2017-02-03 11:12:53 -07:00
|
|
|
caught_up = True
|
|
|
|
|
2017-02-18 07:41:31 -07:00
|
|
|
logger.info("Rotating notifications up to: %s", rotate_to_stream_ordering)
|
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
self._rotate_notifs_before_txn(
|
|
|
|
txn, old_rotate_stream_ordering, rotate_to_stream_ordering
|
|
|
|
)
|
2017-02-03 11:12:53 -07:00
|
|
|
|
|
|
|
return caught_up
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
def _rotate_notifs_before_txn(
|
2022-08-04 13:24:44 -06:00
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
old_rotate_stream_ordering: int,
|
|
|
|
rotate_to_stream_ordering: int,
|
2021-12-21 06:25:34 -07:00
|
|
|
) -> None:
|
2022-08-04 13:24:44 -06:00
|
|
|
"""Archives older notifications (from event_push_actions) into event_push_summary.
|
|
|
|
|
|
|
|
Any event_push_actions between old_rotate_stream_ordering (exclusive) and
|
|
|
|
rotate_to_stream_ordering (inclusive) will be added to the event_push_summary
|
|
|
|
table.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn: The database transaction.
|
|
|
|
old_rotate_stream_ordering: The previous maximum event stream ordering.
|
|
|
|
rotate_to_stream_ordering: The new maximum event stream ordering to summarise.
|
|
|
|
"""
|
2017-02-03 11:12:53 -07:00
|
|
|
|
|
|
|
# Calculate the new counts that should be upserted into event_push_summary
|
|
|
|
sql = """
|
|
|
|
SELECT user_id, room_id,
|
2020-09-02 10:19:37 -06:00
|
|
|
coalesce(old.%s, 0) + upd.cnt,
|
2022-07-05 06:51:04 -06:00
|
|
|
upd.stream_ordering
|
2017-02-03 11:12:53 -07:00
|
|
|
FROM (
|
2020-09-02 10:19:37 -06:00
|
|
|
SELECT user_id, room_id, count(*) as cnt,
|
2022-07-08 07:00:29 -06:00
|
|
|
max(ea.stream_ordering) as stream_ordering
|
|
|
|
FROM event_push_actions AS ea
|
|
|
|
LEFT JOIN event_push_summary AS old USING (user_id, room_id)
|
|
|
|
WHERE ? < ea.stream_ordering AND ea.stream_ordering <= ?
|
|
|
|
AND (
|
|
|
|
old.last_receipt_stream_ordering IS NULL
|
|
|
|
OR old.last_receipt_stream_ordering < ea.stream_ordering
|
|
|
|
)
|
2020-09-02 10:19:37 -06:00
|
|
|
AND %s = 1
|
2017-02-03 11:12:53 -07:00
|
|
|
GROUP BY user_id, room_id
|
|
|
|
) AS upd
|
|
|
|
LEFT JOIN event_push_summary AS old USING (user_id, room_id)
|
|
|
|
"""
|
|
|
|
|
2020-09-02 10:19:37 -06:00
|
|
|
# First get the count of unread messages.
|
|
|
|
txn.execute(
|
|
|
|
sql % ("unread_count", "unread"),
|
|
|
|
(old_rotate_stream_ordering, rotate_to_stream_ordering),
|
|
|
|
)
|
|
|
|
|
|
|
|
# We need to merge results from the two requests (the one that retrieves the
|
|
|
|
# unread count and the one that retrieves the notifications count) into a single
|
|
|
|
# object because we might not have the same amount of rows in each of them. To do
|
|
|
|
# this, we use a dict indexed on the user ID and room ID to make it easier to
|
|
|
|
# populate.
|
2021-07-15 10:46:54 -06:00
|
|
|
summaries: Dict[Tuple[str, str], _EventPushSummary] = {}
|
2020-09-02 10:19:37 -06:00
|
|
|
for row in txn:
|
|
|
|
summaries[(row[0], row[1])] = _EventPushSummary(
|
|
|
|
unread_count=row[2],
|
|
|
|
stream_ordering=row[3],
|
|
|
|
notif_count=0,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Then get the count of notifications.
|
|
|
|
txn.execute(
|
|
|
|
sql % ("notif_count", "notif"),
|
|
|
|
(old_rotate_stream_ordering, rotate_to_stream_ordering),
|
|
|
|
)
|
|
|
|
|
|
|
|
for row in txn:
|
|
|
|
if (row[0], row[1]) in summaries:
|
|
|
|
summaries[(row[0], row[1])].notif_count = row[2]
|
|
|
|
else:
|
|
|
|
# Because the rules on notifying are different than the rules on marking
|
|
|
|
# a message unread, we might end up with messages that notify but aren't
|
|
|
|
# marked unread, so we might not have a summary for this (user, room)
|
|
|
|
# tuple to complete.
|
|
|
|
summaries[(row[0], row[1])] = _EventPushSummary(
|
|
|
|
unread_count=0,
|
|
|
|
stream_ordering=row[3],
|
|
|
|
notif_count=row[2],
|
|
|
|
)
|
2020-06-12 04:07:26 -06:00
|
|
|
|
2020-09-02 10:19:37 -06:00
|
|
|
logger.info("Rotating notifications, handling %d rows", len(summaries))
|
2017-02-18 07:41:31 -07:00
|
|
|
|
2022-09-14 11:11:16 -06:00
|
|
|
# TODO(threads): Update on a per-thread basis.
|
2022-07-05 06:51:04 -06:00
|
|
|
self.db_pool.simple_upsert_many_txn(
|
2017-02-03 11:12:53 -07:00
|
|
|
txn,
|
|
|
|
table="event_push_summary",
|
2022-07-05 06:51:04 -06:00
|
|
|
key_names=("user_id", "room_id"),
|
|
|
|
key_values=[(user_id, room_id) for user_id, room_id in summaries],
|
2022-09-14 11:11:16 -06:00
|
|
|
value_names=("notif_count", "unread_count", "stream_ordering", "thread_id"),
|
2022-07-05 06:51:04 -06:00
|
|
|
value_values=[
|
2022-01-13 17:44:18 -07:00
|
|
|
(
|
|
|
|
summary.notif_count,
|
|
|
|
summary.unread_count,
|
|
|
|
summary.stream_ordering,
|
2022-09-14 11:11:16 -06:00
|
|
|
"main",
|
2022-01-13 17:44:18 -07:00
|
|
|
)
|
2022-07-05 06:51:04 -06:00
|
|
|
for summary in summaries.values()
|
2019-04-03 03:07:29 -06:00
|
|
|
],
|
2017-02-03 11:12:53 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
txn.execute(
|
2022-06-15 09:17:14 -06:00
|
|
|
"UPDATE event_push_summary_stream_ordering SET stream_ordering = ?",
|
|
|
|
(rotate_to_stream_ordering,),
|
2017-02-03 11:12:53 -07:00
|
|
|
)
|
|
|
|
|
2022-08-15 07:33:17 -06:00
|
|
|
async def _remove_old_push_actions_that_have_rotated(self) -> None:
|
2022-08-04 13:24:44 -06:00
|
|
|
"""Clear out old push actions that have been summarised."""
|
2017-02-18 07:41:31 -07:00
|
|
|
|
2022-08-04 13:24:44 -06:00
|
|
|
# We want to clear out anything that is older than a day that *has* already
|
2022-06-15 09:17:14 -06:00
|
|
|
# been rotated.
|
|
|
|
rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol(
|
|
|
|
table="event_push_summary_stream_ordering",
|
|
|
|
keyvalues={},
|
|
|
|
retcol="stream_ordering",
|
|
|
|
)
|
|
|
|
|
|
|
|
max_stream_ordering_to_delete = min(
|
|
|
|
rotated_upto_stream_ordering, self.stream_ordering_day_ago
|
2017-02-03 11:12:53 -07:00
|
|
|
)
|
|
|
|
|
2022-06-15 09:17:14 -06:00
|
|
|
def remove_old_push_actions_that_have_rotated_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> bool:
|
|
|
|
# We don't want to clear out too much at a time, so we bound our
|
|
|
|
# deletes.
|
2022-07-11 09:51:30 -06:00
|
|
|
batch_size = self._rotate_count
|
2022-06-15 09:17:14 -06:00
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
SELECT stream_ordering FROM event_push_actions
|
2022-07-06 05:09:19 -06:00
|
|
|
WHERE stream_ordering <= ? AND highlight = 0
|
2022-06-15 09:17:14 -06:00
|
|
|
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
|
2022-08-04 13:24:44 -06:00
|
|
|
""",
|
2022-06-15 09:17:14 -06:00
|
|
|
(
|
|
|
|
max_stream_ordering_to_delete,
|
|
|
|
batch_size,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
stream_row = txn.fetchone()
|
|
|
|
|
|
|
|
if stream_row:
|
|
|
|
(stream_ordering,) = stream_row
|
|
|
|
else:
|
|
|
|
stream_ordering = max_stream_ordering_to_delete
|
|
|
|
|
2022-07-06 05:09:19 -06:00
|
|
|
# We need to use a inclusive bound here to handle the case where a
|
|
|
|
# single stream ordering has more than `batch_size` rows.
|
2022-06-15 09:17:14 -06:00
|
|
|
txn.execute(
|
|
|
|
"""
|
|
|
|
DELETE FROM event_push_actions
|
2022-07-06 05:09:19 -06:00
|
|
|
WHERE stream_ordering <= ? AND highlight = 0
|
2022-06-15 09:17:14 -06:00
|
|
|
""",
|
|
|
|
(stream_ordering,),
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("Rotating notifications, deleted %s push actions", txn.rowcount)
|
|
|
|
|
|
|
|
return txn.rowcount < batch_size
|
|
|
|
|
|
|
|
while True:
|
|
|
|
done = await self.db_pool.runInteraction(
|
|
|
|
"_remove_old_push_actions_that_have_rotated",
|
|
|
|
remove_old_push_actions_that_have_rotated_txn,
|
|
|
|
)
|
|
|
|
if done:
|
|
|
|
break
|
|
|
|
|
2016-02-03 03:50:49 -07:00
|
|
|
|
2020-10-09 05:37:51 -06:00
|
|
|
class EventPushActionsStore(EventPushActionsWorkerStore):
|
|
|
|
EPA_HIGHLIGHT_INDEX = "epa_highlight_index"
|
|
|
|
|
2021-12-13 10:05:00 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2020-10-09 05:37:51 -06:00
|
|
|
super().__init__(database, db_conn, hs)
|
|
|
|
|
|
|
|
self.db_pool.updates.register_background_index_update(
|
|
|
|
self.EPA_HIGHLIGHT_INDEX,
|
|
|
|
index_name="event_push_actions_u_highlight",
|
|
|
|
table="event_push_actions",
|
|
|
|
columns=["user_id", "stream_ordering"],
|
|
|
|
)
|
|
|
|
|
|
|
|
self.db_pool.updates.register_background_index_update(
|
|
|
|
"event_push_actions_highlights_index",
|
|
|
|
index_name="event_push_actions_highlights_index",
|
|
|
|
table="event_push_actions",
|
|
|
|
columns=["user_id", "room_id", "topological_ordering", "stream_ordering"],
|
|
|
|
where_clause="highlight=1",
|
|
|
|
)
|
|
|
|
|
2022-06-30 08:05:49 -06:00
|
|
|
# Add index to make deleting old push actions faster.
|
|
|
|
self.db_pool.updates.register_background_index_update(
|
|
|
|
"event_push_actions_stream_highlight_index",
|
|
|
|
index_name="event_push_actions_stream_highlight_index",
|
|
|
|
table="event_push_actions",
|
|
|
|
columns=["highlight", "stream_ordering"],
|
|
|
|
where_clause="highlight=0",
|
|
|
|
)
|
|
|
|
|
2020-10-09 05:37:51 -06:00
|
|
|
async def get_push_actions_for_user(
|
2021-12-21 06:25:34 -07:00
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
before: Optional[str] = None,
|
|
|
|
limit: int = 50,
|
|
|
|
only_highlight: bool = False,
|
|
|
|
) -> List[UserPushAction]:
|
|
|
|
def f(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> List[Tuple[str, str, int, int, str, bool, str, int]]:
|
2020-10-09 05:37:51 -06:00
|
|
|
before_clause = ""
|
|
|
|
if before:
|
|
|
|
before_clause = "AND epa.stream_ordering < ?"
|
|
|
|
args = [user_id, before, limit]
|
|
|
|
else:
|
|
|
|
args = [user_id, limit]
|
|
|
|
|
|
|
|
if only_highlight:
|
|
|
|
if len(before_clause) > 0:
|
|
|
|
before_clause += " "
|
|
|
|
before_clause += "AND epa.highlight = 1"
|
|
|
|
|
|
|
|
# NB. This assumes event_ids are globally unique since
|
|
|
|
# it makes the query easier to index
|
2022-08-04 13:24:44 -06:00
|
|
|
sql = """
|
|
|
|
SELECT epa.event_id, epa.room_id,
|
|
|
|
epa.stream_ordering, epa.topological_ordering,
|
|
|
|
epa.actions, epa.highlight, epa.profile_tag, e.received_ts
|
|
|
|
FROM event_push_actions epa, events e
|
|
|
|
WHERE epa.event_id = e.event_id
|
|
|
|
AND epa.user_id = ? %s
|
|
|
|
AND epa.notif = 1
|
|
|
|
ORDER BY epa.stream_ordering DESC
|
|
|
|
LIMIT ?
|
|
|
|
""" % (
|
|
|
|
before_clause,
|
2020-10-09 05:37:51 -06:00
|
|
|
)
|
|
|
|
txn.execute(sql, args)
|
2021-12-29 06:04:28 -07:00
|
|
|
return cast(
|
|
|
|
List[Tuple[str, str, int, int, str, bool, str, int]], txn.fetchall()
|
|
|
|
)
|
2020-10-09 05:37:51 -06:00
|
|
|
|
|
|
|
push_actions = await self.db_pool.runInteraction("get_push_actions_for_user", f)
|
2021-12-21 06:25:34 -07:00
|
|
|
return [
|
|
|
|
UserPushAction(
|
|
|
|
event_id=row[0],
|
|
|
|
room_id=row[1],
|
|
|
|
stream_ordering=row[2],
|
|
|
|
actions=_deserialize_action(row[4], row[5]),
|
|
|
|
received_ts=row[7],
|
|
|
|
topological_ordering=row[3],
|
|
|
|
highlight=row[5],
|
|
|
|
profile_tag=row[6],
|
|
|
|
)
|
|
|
|
for row in push_actions
|
|
|
|
]
|
2020-10-09 05:37:51 -06:00
|
|
|
|
|
|
|
|
2022-08-16 05:22:17 -06:00
|
|
|
def _action_has_highlight(actions: Collection[Union[Mapping, str]]) -> bool:
|
2016-02-03 03:50:49 -07:00
|
|
|
for action in actions:
|
2021-12-21 06:25:34 -07:00
|
|
|
if not isinstance(action, dict):
|
|
|
|
continue
|
|
|
|
|
|
|
|
if action.get("set_tweak", None) == "highlight":
|
|
|
|
return action.get("value", True)
|
2016-02-03 03:50:49 -07:00
|
|
|
|
|
|
|
return False
|
2020-09-02 10:19:37 -06:00
|
|
|
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-09-02 10:19:37 -06:00
|
|
|
class _EventPushSummary:
|
|
|
|
"""Summary of pending event push actions for a given user in a given room.
|
|
|
|
Used in _rotate_notifs_before_txn to manipulate results from event_push_actions.
|
|
|
|
"""
|
|
|
|
|
2021-12-21 06:25:34 -07:00
|
|
|
unread_count: int
|
|
|
|
stream_ordering: int
|
|
|
|
notif_count: int
|