Merge remote-tracking branch 'origin/release-v1.80' into matrix-org-hotfixes
This commit is contained in:
commit
6400f03029
|
@ -10,6 +10,7 @@ on:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
@ -34,11 +35,20 @@ jobs:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Log in to GHCR
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Calculate docker image tag
|
- name: Calculate docker image tag
|
||||||
id: set-tag
|
id: set-tag
|
||||||
uses: docker/metadata-action@master
|
uses: docker/metadata-action@master
|
||||||
with:
|
with:
|
||||||
images: matrixdotorg/synapse
|
images: |
|
||||||
|
docker.io/matrixdotorg/synapse
|
||||||
|
ghcr.io/matrix-org/synapse
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=false
|
latest=false
|
||||||
tags: |
|
tags: |
|
||||||
|
|
|
@ -4,13 +4,15 @@ name: Build release artifacts
|
||||||
|
|
||||||
on:
|
on:
|
||||||
# we build on PRs and develop to (hopefully) get early warning
|
# we build on PRs and develop to (hopefully) get early warning
|
||||||
# of things breaking (but only build one set of debs)
|
# of things breaking (but only build one set of debs). PRs skip
|
||||||
|
# building wheels on macOS & ARM.
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
branches: ["develop", "release-*"]
|
branches: ["develop", "release-*"]
|
||||||
|
|
||||||
# we do the full build on tags.
|
# we do the full build on tags.
|
||||||
tags: ["v*"]
|
tags: ["v*"]
|
||||||
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
|
|
|
@ -4,6 +4,7 @@ on:
|
||||||
push:
|
push:
|
||||||
branches: ["develop", "release-*"]
|
branches: ["develop", "release-*"]
|
||||||
pull_request:
|
pull_request:
|
||||||
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
|
|
|
@ -1,3 +1,9 @@
|
||||||
|
Synapse 1.79.0 (2023-03-14)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
No significant changes since 1.79.0rc2.
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.79.0rc2 (2023-03-13)
|
Synapse 1.79.0rc2 (2023-03-13)
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
|
|
|
@ -13,9 +13,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "anyhow"
|
name = "anyhow"
|
||||||
version = "1.0.69"
|
version = "1.0.70"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
|
checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "arc-swap"
|
name = "arc-swap"
|
||||||
|
@ -185,9 +185,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "proc-macro2"
|
name = "proc-macro2"
|
||||||
version = "1.0.46"
|
version = "1.0.52"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
|
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
@ -250,7 +250,7 @@ dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"pyo3-macros-backend",
|
"pyo3-macros-backend",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 1.0.104",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -261,7 +261,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 1.0.104",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -276,9 +276,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "quote"
|
name = "quote"
|
||||||
version = "1.0.21"
|
version = "1.0.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
|
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
]
|
]
|
||||||
|
@ -323,22 +323,22 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde"
|
name = "serde"
|
||||||
version = "1.0.152"
|
version = "1.0.157"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
|
checksum = "707de5fcf5df2b5788fca98dd7eab490bc2fd9b7ef1404defc462833b83f25ca"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_derive",
|
"serde_derive",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_derive"
|
name = "serde_derive"
|
||||||
version = "1.0.152"
|
version = "1.0.157"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
|
checksum = "78997f4555c22a7971214540c4a661291970619afd56de19f77e0de86296e1e5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -375,6 +375,17 @@ dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "syn"
|
||||||
|
version = "2.0.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "59d3276aee1fa0c33612917969b5172b5be2db051232a6e4826f1a1a9191b045"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "synapse"
|
name = "synapse"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change.
|
|
@ -0,0 +1 @@
|
||||||
|
Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change.
|
|
@ -0,0 +1 @@
|
||||||
|
Add additional functionality to declaring worker types when starting Complement in worker mode.
|
|
@ -0,0 +1 @@
|
||||||
|
Add `Synapse-Trace-Id` to `access-control-expose-headers` header.
|
|
@ -0,0 +1 @@
|
||||||
|
Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition.
|
|
@ -0,0 +1 @@
|
||||||
|
Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules.
|
|
@ -0,0 +1 @@
|
||||||
|
Improve performance of creating and authenticating events.
|
|
@ -0,0 +1 @@
|
||||||
|
Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key.
|
|
@ -0,0 +1 @@
|
||||||
|
Improve log lines when purging rooms.
|
|
@ -0,0 +1 @@
|
||||||
|
Add a missing endpoint to the workers documentation.
|
|
@ -0,0 +1 @@
|
||||||
|
Add topic and name events to group of events that are batch persisted when creating a room.
|
|
@ -0,0 +1 @@
|
||||||
|
Improve type hints.
|
|
@ -0,0 +1 @@
|
||||||
|
Improve type hints.
|
|
@ -0,0 +1 @@
|
||||||
|
Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged.
|
|
@ -0,0 +1 @@
|
||||||
|
Fix long-standing error when sending message into deleted room.
|
|
@ -0,0 +1 @@
|
||||||
|
Move various module API callback registration methods to a dedicated class.
|
|
@ -0,0 +1 @@
|
||||||
|
Improve type hints.
|
|
@ -0,0 +1 @@
|
||||||
|
Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel.
|
|
@ -0,0 +1 @@
|
||||||
|
Configure GitHub Actions for merge queues.
|
|
@ -0,0 +1 @@
|
||||||
|
Add schema comments about the `destinations` and `destination_rooms` tables.
|
|
@ -0,0 +1 @@
|
||||||
|
Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump hiredis from 2.2.1 to 2.2.2.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump serde from 1.0.152 to 1.0.155.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump pysaml2 from 7.2.1 to 7.3.1.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump msgpack from 1.0.4 to 1.0.5.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump gitpython from 3.1.30 to 3.1.31.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump cryptography from 39.0.1 to 39.0.2.
|
|
@ -0,0 +1 @@
|
||||||
|
Skip processing of auto-join room behaviour if there are not auto-join rooms configured.
|
|
@ -0,0 +1 @@
|
||||||
|
Remove unused store method `_set_destination_retry_timings_emulated`.
|
|
@ -0,0 +1 @@
|
||||||
|
Allow loading `/register/available` endpoint on workers.
|
|
@ -0,0 +1 @@
|
||||||
|
Reorganize URL preview code.
|
|
@ -0,0 +1 @@
|
||||||
|
Clean-up direct TCP replication code.
|
|
@ -0,0 +1 @@
|
||||||
|
Clean-up direct TCP replication code.
|
|
@ -0,0 +1 @@
|
||||||
|
Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python.
|
|
@ -0,0 +1 @@
|
||||||
|
Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`).
|
|
@ -0,0 +1 @@
|
||||||
|
Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`).
|
|
@ -0,0 +1 @@
|
||||||
|
Bump pydantic from 1.10.4 to 1.10.6.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump serde from 1.0.155 to 1.0.157.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump anyhow from 1.0.69 to 1.0.70.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump txredisapi from 1.4.7 to 1.4.9.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump pygithub from 1.57 to 1.58.1.
|
|
@ -0,0 +1 @@
|
||||||
|
Bump types-requests from 2.28.11.12 to 2.28.11.15.
|
|
@ -0,0 +1 @@
|
||||||
|
Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952).
|
|
@ -1,3 +1,9 @@
|
||||||
|
matrix-synapse-py3 (1.79.0) stable; urgency=medium
|
||||||
|
|
||||||
|
* New Synapse release 1.79.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
|
||||||
|
|
||||||
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
|
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
|
||||||
|
|
||||||
* New Synapse release 1.79.0rc2.
|
* New Synapse release 1.79.0rc2.
|
||||||
|
|
|
@ -37,9 +37,24 @@ RUN \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
apt-get update -qq && apt-get install -yqq \
|
apt-get update -qq && apt-get install -yqq \
|
||||||
build-essential git libffi-dev libssl-dev \
|
build-essential curl git libffi-dev libssl-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install rust and ensure its in the PATH.
|
||||||
|
# (Rust may be needed to compile `cryptography`---which is one of poetry's
|
||||||
|
# dependencies---on platforms that don't have a `cryptography` wheel.
|
||||||
|
ENV RUSTUP_HOME=/rust
|
||||||
|
ENV CARGO_HOME=/cargo
|
||||||
|
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||||
|
RUN mkdir /rust /cargo
|
||||||
|
|
||||||
|
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||||
|
|
||||||
|
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||||
|
# set to true, so we expose it as a build-arg.
|
||||||
|
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
|
||||||
|
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
|
||||||
|
|
||||||
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
# We install poetry in its own build stage to avoid its dependencies conflicting with
|
||||||
# synapse's dependencies.
|
# synapse's dependencies.
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
|
|
@ -51,8 +51,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||||
# -z True if the length of string is zero.
|
# -z True if the length of string is zero.
|
||||||
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
|
||||||
export SYNAPSE_WORKER_TYPES="\
|
export SYNAPSE_WORKER_TYPES="\
|
||||||
event_persister, \
|
event_persister:2, \
|
||||||
event_persister, \
|
|
||||||
background_worker, \
|
background_worker, \
|
||||||
frontend_proxy, \
|
frontend_proxy, \
|
||||||
event_creator, \
|
event_creator, \
|
||||||
|
@ -64,7 +63,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||||
synchrotron, \
|
synchrotron, \
|
||||||
client_reader, \
|
client_reader, \
|
||||||
appservice, \
|
appservice, \
|
||||||
pusher"
|
pusher, \
|
||||||
|
stream_writers=account_data+presence+receipts+to_device+typing"
|
||||||
|
|
||||||
fi
|
fi
|
||||||
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
log "Workers requested: $SYNAPSE_WORKER_TYPES"
|
||||||
|
|
|
@ -19,8 +19,15 @@
|
||||||
# The environment variables it reads are:
|
# The environment variables it reads are:
|
||||||
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
||||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
|
||||||
# below. Leave empty for no workers.
|
# below. Leave empty for no workers. Add a ':' and a number at the end to
|
||||||
|
# multiply that worker. Append multiple worker types with '+' to merge the
|
||||||
|
# worker types into a single worker. Add a name and a '=' to the front of a
|
||||||
|
# worker type to give this instance a name in logs and nginx.
|
||||||
|
# Examples:
|
||||||
|
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
|
||||||
|
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
|
||||||
|
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
|
||||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||||
# will be treated as Application Service registration files.
|
# will be treated as Application Service registration files.
|
||||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||||
|
@ -40,16 +47,33 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
from collections import defaultdict
|
||||||
|
from itertools import chain
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
from typing import (
|
||||||
|
Any,
|
||||||
|
Dict,
|
||||||
|
List,
|
||||||
|
Mapping,
|
||||||
|
MutableMapping,
|
||||||
|
NoReturn,
|
||||||
|
Optional,
|
||||||
|
Set,
|
||||||
|
SupportsIndex,
|
||||||
|
)
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
from jinja2 import Environment, FileSystemLoader
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
|
||||||
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
||||||
|
|
||||||
|
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||||
|
# during processing with the name of the worker.
|
||||||
|
WORKER_PLACEHOLDER_NAME = "placeholder_name"
|
||||||
|
|
||||||
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
||||||
# Watching /_matrix/client needs a "client" listener
|
# Watching /_matrix/client needs a "client" listener
|
||||||
# Watching /_matrix/federation needs a "federation" listener
|
# Watching /_matrix/federation needs a "federation" listener
|
||||||
|
@ -70,11 +94,13 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||||
"endpoint_patterns": [
|
"endpoint_patterns": [
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
||||||
],
|
],
|
||||||
"shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
|
"shared_extra_conf": {
|
||||||
|
"update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
|
||||||
|
},
|
||||||
"worker_extra_conf": "",
|
"worker_extra_conf": "",
|
||||||
},
|
},
|
||||||
"media_repository": {
|
"media_repository": {
|
||||||
"app": "synapse.app.media_repository",
|
"app": "synapse.app.generic_worker",
|
||||||
"listener_resources": ["media"],
|
"listener_resources": ["media"],
|
||||||
"endpoint_patterns": [
|
"endpoint_patterns": [
|
||||||
"^/_matrix/media/",
|
"^/_matrix/media/",
|
||||||
|
@ -87,7 +113,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||||
# The first configured media worker will run the media background jobs
|
# The first configured media worker will run the media background jobs
|
||||||
"shared_extra_conf": {
|
"shared_extra_conf": {
|
||||||
"enable_media_repo": False,
|
"enable_media_repo": False,
|
||||||
"media_instance_running_background_jobs": "media_repository1",
|
"media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
|
||||||
},
|
},
|
||||||
"worker_extra_conf": "enable_media_repo: true",
|
"worker_extra_conf": "enable_media_repo: true",
|
||||||
},
|
},
|
||||||
|
@ -95,7 +121,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||||
"app": "synapse.app.generic_worker",
|
"app": "synapse.app.generic_worker",
|
||||||
"listener_resources": [],
|
"listener_resources": [],
|
||||||
"endpoint_patterns": [],
|
"endpoint_patterns": [],
|
||||||
"shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
|
"shared_extra_conf": {
|
||||||
|
"notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
|
||||||
|
},
|
||||||
"worker_extra_conf": "",
|
"worker_extra_conf": "",
|
||||||
},
|
},
|
||||||
"federation_sender": {
|
"federation_sender": {
|
||||||
|
@ -135,6 +163,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||||
"^/_matrix/client/versions$",
|
"^/_matrix/client/versions$",
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
||||||
"^/_matrix/client/(r0|v3|unstable)/register$",
|
"^/_matrix/client/(r0|v3|unstable)/register$",
|
||||||
|
"^/_matrix/client/(r0|v3|unstable)/register/available$",
|
||||||
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
||||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||||
|
@ -192,9 +221,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||||
"app": "synapse.app.generic_worker",
|
"app": "synapse.app.generic_worker",
|
||||||
"listener_resources": [],
|
"listener_resources": [],
|
||||||
"endpoint_patterns": [],
|
"endpoint_patterns": [],
|
||||||
# This worker cannot be sharded. Therefore there should only ever be one background
|
# This worker cannot be sharded. Therefore, there should only ever be one
|
||||||
# worker, and it should be named background_worker1
|
# background worker. This is enforced for the safety of your database.
|
||||||
"shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
|
"shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
|
||||||
"worker_extra_conf": "",
|
"worker_extra_conf": "",
|
||||||
},
|
},
|
||||||
"event_creator": {
|
"event_creator": {
|
||||||
|
@ -275,7 +304,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
|
||||||
"""
|
"""
|
||||||
|
|
||||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||||
upstream {upstream_worker_type} {{
|
upstream {upstream_worker_base_name} {{
|
||||||
{body}
|
{body}
|
||||||
}}
|
}}
|
||||||
"""
|
"""
|
||||||
|
@ -326,7 +355,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||||
|
|
||||||
def add_worker_roles_to_shared_config(
|
def add_worker_roles_to_shared_config(
|
||||||
shared_config: dict,
|
shared_config: dict,
|
||||||
worker_type: str,
|
worker_types_set: Set[str],
|
||||||
worker_name: str,
|
worker_name: str,
|
||||||
worker_port: int,
|
worker_port: int,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -334,22 +363,36 @@ def add_worker_roles_to_shared_config(
|
||||||
append appropriate worker information to it for the current worker_type instance.
|
append appropriate worker information to it for the current worker_type instance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
shared_config: The config dict that all worker instances share (after being converted to YAML)
|
shared_config: The config dict that all worker instances share (after being
|
||||||
worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
|
converted to YAML)
|
||||||
|
worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
|
||||||
|
This list can be a single worker type or multiple.
|
||||||
worker_name: The name of the worker instance.
|
worker_name: The name of the worker instance.
|
||||||
worker_port: The HTTP replication port that the worker instance is listening on.
|
worker_port: The HTTP replication port that the worker instance is listening on.
|
||||||
"""
|
"""
|
||||||
# The instance_map config field marks the workers that write to various replication streams
|
# The instance_map config field marks the workers that write to various replication
|
||||||
|
# streams
|
||||||
instance_map = shared_config.setdefault("instance_map", {})
|
instance_map = shared_config.setdefault("instance_map", {})
|
||||||
|
|
||||||
# Worker-type specific sharding config
|
# This is a list of the stream_writers that there can be only one of. Events can be
|
||||||
if worker_type == "pusher":
|
# sharded, and therefore doesn't belong here.
|
||||||
|
singular_stream_writers = [
|
||||||
|
"account_data",
|
||||||
|
"presence",
|
||||||
|
"receipts",
|
||||||
|
"to_device",
|
||||||
|
"typing",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Worker-type specific sharding config. Now a single worker can fulfill multiple
|
||||||
|
# roles, check each.
|
||||||
|
if "pusher" in worker_types_set:
|
||||||
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
shared_config.setdefault("pusher_instances", []).append(worker_name)
|
||||||
|
|
||||||
elif worker_type == "federation_sender":
|
if "federation_sender" in worker_types_set:
|
||||||
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
|
||||||
|
|
||||||
elif worker_type == "event_persister":
|
if "event_persister" in worker_types_set:
|
||||||
# Event persisters write to the events stream, so we need to update
|
# Event persisters write to the events stream, so we need to update
|
||||||
# the list of event stream writers
|
# the list of event stream writers
|
||||||
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
|
||||||
|
@ -362,11 +405,13 @@ def add_worker_roles_to_shared_config(
|
||||||
"port": worker_port,
|
"port": worker_port,
|
||||||
}
|
}
|
||||||
|
|
||||||
elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
|
# Update the list of stream writers. It's convenient that the name of the worker
|
||||||
# Update the list of stream writers
|
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||||
# It's convenient that the name of the worker type is the same as the stream to write
|
# is more than one.
|
||||||
|
for worker in worker_types_set:
|
||||||
|
if worker in singular_stream_writers:
|
||||||
shared_config.setdefault("stream_writers", {}).setdefault(
|
shared_config.setdefault("stream_writers", {}).setdefault(
|
||||||
worker_type, []
|
worker, []
|
||||||
).append(worker_name)
|
).append(worker_name)
|
||||||
|
|
||||||
# Map of stream writer instance names to host/ports combos
|
# Map of stream writer instance names to host/ports combos
|
||||||
|
@ -377,6 +422,139 @@ def add_worker_roles_to_shared_config(
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def merge_worker_template_configs(
|
||||||
|
existing_dict: Optional[Dict[str, Any]],
|
||||||
|
to_be_merged_dict: Dict[str, Any],
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""When given an existing dict of worker template configuration consisting with both
|
||||||
|
dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
|
||||||
|
return new dict.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
existing_dict: Either an existing worker template or a fresh blank one.
|
||||||
|
to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
|
||||||
|
existing_dict.
|
||||||
|
Returns: The newly merged together dict values.
|
||||||
|
"""
|
||||||
|
new_dict: Dict[str, Any] = {}
|
||||||
|
if not existing_dict:
|
||||||
|
# It doesn't exist yet, just use the new dict(but take a copy not a reference)
|
||||||
|
new_dict = to_be_merged_dict.copy()
|
||||||
|
else:
|
||||||
|
for i in to_be_merged_dict.keys():
|
||||||
|
if (i == "endpoint_patterns") or (i == "listener_resources"):
|
||||||
|
# merge the two lists, remove duplicates
|
||||||
|
new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
|
||||||
|
elif i == "shared_extra_conf":
|
||||||
|
# merge dictionary's, the worker name will be replaced later
|
||||||
|
new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
|
||||||
|
elif i == "worker_extra_conf":
|
||||||
|
# There is only one worker type that has a 'worker_extra_conf' and it is
|
||||||
|
# the media_repo. Since duplicate worker types on the same worker don't
|
||||||
|
# work, this is fine.
|
||||||
|
new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
|
||||||
|
else:
|
||||||
|
# Everything else should be identical, like "app", which only works
|
||||||
|
# because all apps are now generic_workers.
|
||||||
|
new_dict[i] = to_be_merged_dict[i]
|
||||||
|
return new_dict
|
||||||
|
|
||||||
|
|
||||||
|
def insert_worker_name_for_worker_config(
|
||||||
|
existing_dict: Dict[str, Any], worker_name: str
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Insert a given worker name into the worker's configuration dict.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
existing_dict: The worker_config dict that is imported into shared_config.
|
||||||
|
worker_name: The name of the worker to insert.
|
||||||
|
Returns: Copy of the dict with newly inserted worker name
|
||||||
|
"""
|
||||||
|
dict_to_edit = existing_dict.copy()
|
||||||
|
for k, v in dict_to_edit["shared_extra_conf"].items():
|
||||||
|
# Only proceed if it's the placeholder name string
|
||||||
|
if v == WORKER_PLACEHOLDER_NAME:
|
||||||
|
dict_to_edit["shared_extra_conf"][k] = worker_name
|
||||||
|
return dict_to_edit
|
||||||
|
|
||||||
|
|
||||||
|
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
|
||||||
|
"""
|
||||||
|
Apply multiplier(if found) by returning a new expanded list with some basic error
|
||||||
|
checking.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
worker_types: The unprocessed List of requested workers
|
||||||
|
Returns:
|
||||||
|
A new list with all requested workers expanded.
|
||||||
|
"""
|
||||||
|
# Checking performed:
|
||||||
|
# 1. if worker:2 or more is declared, it will create additional workers up to number
|
||||||
|
# 2. if worker:1, it will create a single copy of this worker as if no number was
|
||||||
|
# given
|
||||||
|
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
|
||||||
|
# scripting and automated expansion and is intended behaviour.
|
||||||
|
# 4. if worker:NaN or is a negative number, it will error and log it.
|
||||||
|
new_worker_types = []
|
||||||
|
for worker_type in worker_types:
|
||||||
|
if ":" in worker_type:
|
||||||
|
worker_type_components = split_and_strip_string(worker_type, ":", 1)
|
||||||
|
worker_count = 0
|
||||||
|
# Should only be 2 components, a type of worker(s) and an integer as a
|
||||||
|
# string. Cast the number as an int then it can be used as a counter.
|
||||||
|
try:
|
||||||
|
worker_count = int(worker_type_components[1])
|
||||||
|
except ValueError:
|
||||||
|
error(
|
||||||
|
f"Bad number in worker count for '{worker_type}': "
|
||||||
|
f"'{worker_type_components[1]}' is not an integer"
|
||||||
|
)
|
||||||
|
|
||||||
|
# As long as there are more than 0, we add one to the list to make below.
|
||||||
|
for _ in range(worker_count):
|
||||||
|
new_worker_types.append(worker_type_components[0])
|
||||||
|
|
||||||
|
else:
|
||||||
|
# If it's not a real worker_type, it will error out later.
|
||||||
|
new_worker_types.append(worker_type)
|
||||||
|
return new_worker_types
|
||||||
|
|
||||||
|
|
||||||
|
def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
|
||||||
|
"""Helper to check to make sure worker types that cannot have multiples do not.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
worker_type: The type of worker to check against.
|
||||||
|
Returns: True if allowed, False if not
|
||||||
|
"""
|
||||||
|
return worker_type not in [
|
||||||
|
"background_worker",
|
||||||
|
"account_data",
|
||||||
|
"presence",
|
||||||
|
"receipts",
|
||||||
|
"typing",
|
||||||
|
"to_device",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def split_and_strip_string(
|
||||||
|
given_string: str, split_char: str, max_split: SupportsIndex = -1
|
||||||
|
) -> List[str]:
|
||||||
|
"""
|
||||||
|
Helper to split a string on split_char and strip whitespace from each end of each
|
||||||
|
element.
|
||||||
|
Args:
|
||||||
|
given_string: The string to split
|
||||||
|
split_char: The character to split the string on
|
||||||
|
max_split: kwarg for split() to limit how many times the split() happens
|
||||||
|
Returns:
|
||||||
|
A List of strings
|
||||||
|
"""
|
||||||
|
# Removes whitespace from ends of result strings before adding to list. Allow for
|
||||||
|
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
|
||||||
|
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
|
||||||
|
|
||||||
|
|
||||||
def generate_base_homeserver_config() -> None:
|
def generate_base_homeserver_config() -> None:
|
||||||
"""Starts Synapse and generates a basic homeserver config, which will later be
|
"""Starts Synapse and generates a basic homeserver config, which will later be
|
||||||
modified for worker support.
|
modified for worker support.
|
||||||
|
@ -389,29 +567,153 @@ def generate_base_homeserver_config() -> None:
|
||||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_worker_types(
|
||||||
|
requested_worker_types: List[str],
|
||||||
|
) -> Dict[str, Set[str]]:
|
||||||
|
"""Read the desired list of requested workers and prepare the data for use in
|
||||||
|
generating worker config files while also checking for potential gotchas.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requested_worker_types: The list formed from the split environment variable
|
||||||
|
containing the unprocessed requests for workers.
|
||||||
|
|
||||||
|
Returns: A dict of worker names to set of worker types. Format:
|
||||||
|
{'worker_name':
|
||||||
|
{'worker_type', 'worker_type2'}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
# A counter of worker_base_name -> int. Used for determining the name for a given
|
||||||
|
# worker when generating its config file, as each worker's name is just
|
||||||
|
# worker_base_name followed by instance number
|
||||||
|
worker_base_name_counter: Dict[str, int] = defaultdict(int)
|
||||||
|
|
||||||
|
# Similar to above, but more finely grained. This is used to determine we don't have
|
||||||
|
# more than a single worker for cases where multiples would be bad(e.g. presence).
|
||||||
|
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
|
||||||
|
|
||||||
|
# The final result of all this processing
|
||||||
|
dict_to_return: Dict[str, Set[str]] = {}
|
||||||
|
|
||||||
|
# Handle any multipliers requested for given workers.
|
||||||
|
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
|
||||||
|
requested_worker_types
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process each worker_type_string
|
||||||
|
# Examples of expected formats:
|
||||||
|
# - requested_name=type1+type2+type3
|
||||||
|
# - synchrotron
|
||||||
|
# - event_creator+event_persister
|
||||||
|
for worker_type_string in multiple_processed_worker_types:
|
||||||
|
# First, if a name is requested, use that — otherwise generate one.
|
||||||
|
worker_base_name: str = ""
|
||||||
|
if "=" in worker_type_string:
|
||||||
|
# Split on "=", remove extra whitespace from ends then make list
|
||||||
|
worker_type_split = split_and_strip_string(worker_type_string, "=")
|
||||||
|
if len(worker_type_split) > 2:
|
||||||
|
error(
|
||||||
|
"There should only be one '=' in the worker type string. "
|
||||||
|
f"Please fix: {worker_type_string}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Assign the name
|
||||||
|
worker_base_name = worker_type_split[0]
|
||||||
|
|
||||||
|
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
|
||||||
|
# Apply a fairly narrow regex to the worker names. Some characters
|
||||||
|
# aren't safe for use in file paths or nginx configurations.
|
||||||
|
# Don't allow to end with a number because we'll add a number
|
||||||
|
# ourselves in a moment.
|
||||||
|
error(
|
||||||
|
"Invalid worker name; please choose a name consisting of "
|
||||||
|
"alphanumeric letters, _ + -, but not ending with a digit: "
|
||||||
|
f"{worker_base_name!r}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Continue processing the remainder of the worker_type string
|
||||||
|
# with the name override removed.
|
||||||
|
worker_type_string = worker_type_split[1]
|
||||||
|
|
||||||
|
# Split the worker_type_string on "+", remove whitespace from ends then make
|
||||||
|
# the list a set so it's deduplicated.
|
||||||
|
worker_types_set: Set[str] = set(
|
||||||
|
split_and_strip_string(worker_type_string, "+")
|
||||||
|
)
|
||||||
|
|
||||||
|
if not worker_base_name:
|
||||||
|
# No base name specified: generate one deterministically from set of
|
||||||
|
# types
|
||||||
|
worker_base_name = "+".join(sorted(worker_types_set))
|
||||||
|
|
||||||
|
# At this point, we have:
|
||||||
|
# worker_base_name which is the name for the worker, without counter.
|
||||||
|
# worker_types_set which is the set of worker types for this worker.
|
||||||
|
|
||||||
|
# Validate worker_type and make sure we don't allow sharding for a worker type
|
||||||
|
# that doesn't support it. Will error and stop if it is a problem,
|
||||||
|
# e.g. 'background_worker'.
|
||||||
|
for worker_type in worker_types_set:
|
||||||
|
# Verify this is a real defined worker type. If it's not, stop everything so
|
||||||
|
# it can be fixed.
|
||||||
|
if worker_type not in WORKERS_CONFIG:
|
||||||
|
error(
|
||||||
|
f"{worker_type} is an unknown worker type! Was found in "
|
||||||
|
f"'{worker_type_string}'. Please fix!"
|
||||||
|
)
|
||||||
|
|
||||||
|
if worker_type in worker_type_shard_counter:
|
||||||
|
if not is_sharding_allowed_for_worker_type(worker_type):
|
||||||
|
error(
|
||||||
|
f"There can be only a single worker with {worker_type} "
|
||||||
|
"type. Please recount and remove."
|
||||||
|
)
|
||||||
|
# Not in shard counter, must not have seen it yet, add it.
|
||||||
|
worker_type_shard_counter[worker_type] += 1
|
||||||
|
|
||||||
|
# Generate the number for the worker using incrementing counter
|
||||||
|
worker_base_name_counter[worker_base_name] += 1
|
||||||
|
worker_number = worker_base_name_counter[worker_base_name]
|
||||||
|
worker_name = f"{worker_base_name}{worker_number}"
|
||||||
|
|
||||||
|
if worker_number > 1:
|
||||||
|
# If this isn't the first worker, check that we don't have a confusing
|
||||||
|
# mixture of worker types with the same base name.
|
||||||
|
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
|
||||||
|
if first_worker_with_base_name != worker_types_set:
|
||||||
|
error(
|
||||||
|
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
|
||||||
|
f"{worker_types_set!r}. It is already in use by "
|
||||||
|
f"worker_type(s): {first_worker_with_base_name!r}"
|
||||||
|
)
|
||||||
|
|
||||||
|
dict_to_return[worker_name] = worker_types_set
|
||||||
|
|
||||||
|
return dict_to_return
|
||||||
|
|
||||||
|
|
||||||
def generate_worker_files(
|
def generate_worker_files(
|
||||||
environ: Mapping[str, str], config_path: str, data_dir: str
|
environ: Mapping[str, str],
|
||||||
|
config_path: str,
|
||||||
|
data_dir: str,
|
||||||
|
requested_worker_types: Dict[str, Set[str]],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Read the desired list of workers from environment variables and generate
|
"""Read the desired workers(if any) that is passed in and generate shared
|
||||||
shared homeserver, nginx and supervisord configs.
|
homeserver, nginx and supervisord configs.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
environ: os.environ instance.
|
environ: os.environ instance.
|
||||||
config_path: The location of the generated Synapse main worker config file.
|
config_path: The location of the generated Synapse main worker config file.
|
||||||
data_dir: The location of the synapse data directory. Where log and
|
data_dir: The location of the synapse data directory. Where log and
|
||||||
user-facing config files live.
|
user-facing config files live.
|
||||||
|
requested_worker_types: A Dict containing requested workers in the format of
|
||||||
|
{'worker_name1': {'worker_type', ...}}
|
||||||
"""
|
"""
|
||||||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||||
# into files at the correct indentation below.
|
# into files at the correct indentation below.
|
||||||
|
|
||||||
# shared_config is the contents of a Synapse config file that will be shared amongst
|
# First read the original config file and extract the listeners block. Then we'll
|
||||||
# the main Synapse process as well as all workers.
|
# add another listener for replication. Later we'll write out the result to the
|
||||||
# It is intended mainly for disabling functionality when certain workers are spun up,
|
# shared config file.
|
||||||
# and adding a replication listener.
|
|
||||||
|
|
||||||
# First read the original config file and extract the listeners block. Then we'll add
|
|
||||||
# another listener for replication. Later we'll write out the result to the shared
|
|
||||||
# config file.
|
|
||||||
listeners = [
|
listeners = [
|
||||||
{
|
{
|
||||||
"port": 9093,
|
"port": 9093,
|
||||||
|
@ -427,9 +729,9 @@ def generate_worker_files(
|
||||||
listeners += original_listeners
|
listeners += original_listeners
|
||||||
|
|
||||||
# The shared homeserver config. The contents of which will be inserted into the
|
# The shared homeserver config. The contents of which will be inserted into the
|
||||||
# base shared worker jinja2 template.
|
# base shared worker jinja2 template. This config file will be passed to all
|
||||||
#
|
# workers, included Synapse's main process. It is intended mainly for disabling
|
||||||
# This config file will be passed to all workers, included Synapse's main process.
|
# functionality when certain workers are spun up, and adding a replication listener.
|
||||||
shared_config: Dict[str, Any] = {"listeners": listeners}
|
shared_config: Dict[str, Any] = {"listeners": listeners}
|
||||||
|
|
||||||
# List of dicts that describe workers.
|
# List of dicts that describe workers.
|
||||||
|
@ -437,31 +739,20 @@ def generate_worker_files(
|
||||||
# program blocks.
|
# program blocks.
|
||||||
worker_descriptors: List[Dict[str, Any]] = []
|
worker_descriptors: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
|
# Upstreams for load-balancing purposes. This dict takes the form of the worker
|
||||||
# ports of each worker. For example:
|
# type to the ports of each worker. For example:
|
||||||
# {
|
# {
|
||||||
# worker_type: {1234, 1235, ...}}
|
# worker_type: {1234, 1235, ...}}
|
||||||
# }
|
# }
|
||||||
# and will be used to construct 'upstream' nginx directives.
|
# and will be used to construct 'upstream' nginx directives.
|
||||||
nginx_upstreams: Dict[str, Set[int]] = {}
|
nginx_upstreams: Dict[str, Set[int]] = {}
|
||||||
|
|
||||||
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
|
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
|
||||||
# placed after the proxy_pass directive. The main benefit to representing this data as a
|
# will be placed after the proxy_pass directive. The main benefit to representing
|
||||||
# dict over a str is that we can easily deduplicate endpoints across multiple instances
|
# this data as a dict over a str is that we can easily deduplicate endpoints
|
||||||
# of the same worker.
|
# across multiple instances of the same worker. The final rendering will be combined
|
||||||
#
|
# with nginx_upstreams and placed in /etc/nginx/conf.d.
|
||||||
# An nginx site config that will be amended to depending on the workers that are
|
nginx_locations: Dict[str, str] = {}
|
||||||
# spun up. To be placed in /etc/nginx/conf.d.
|
|
||||||
nginx_locations = {}
|
|
||||||
|
|
||||||
# Read the desired worker configuration from the environment
|
|
||||||
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
|
||||||
if not worker_types_env:
|
|
||||||
# No workers, just the main process
|
|
||||||
worker_types = []
|
|
||||||
else:
|
|
||||||
# Split type names by comma, ignoring whitespace.
|
|
||||||
worker_types = [x.strip() for x in worker_types_env.split(",")]
|
|
||||||
|
|
||||||
# Create the worker configuration directory if it doesn't already exist
|
# Create the worker configuration directory if it doesn't already exist
|
||||||
os.makedirs("/conf/workers", exist_ok=True)
|
os.makedirs("/conf/workers", exist_ok=True)
|
||||||
|
@ -469,66 +760,57 @@ def generate_worker_files(
|
||||||
# Start worker ports from this arbitrary port
|
# Start worker ports from this arbitrary port
|
||||||
worker_port = 18009
|
worker_port = 18009
|
||||||
|
|
||||||
# A counter of worker_type -> int. Used for determining the name for a given
|
|
||||||
# worker type when generating its config file, as each worker's name is just
|
|
||||||
# worker_type + instance #
|
|
||||||
worker_type_counter: Dict[str, int] = {}
|
|
||||||
|
|
||||||
# A list of internal endpoints to healthcheck, starting with the main process
|
# A list of internal endpoints to healthcheck, starting with the main process
|
||||||
# which exists even if no workers do.
|
# which exists even if no workers do.
|
||||||
healthcheck_urls = ["http://localhost:8080/health"]
|
healthcheck_urls = ["http://localhost:8080/health"]
|
||||||
|
|
||||||
# For each worker type specified by the user, create config values
|
# Get the set of all worker types that we have configured
|
||||||
for worker_type in worker_types:
|
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
# Map locations to upstreams (corresponding to worker types) in Nginx
|
||||||
if worker_config:
|
# but only if we use the appropriate worker type
|
||||||
worker_config = worker_config.copy()
|
for worker_type in all_worker_types_in_use:
|
||||||
else:
|
for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
|
||||||
error(worker_type + " is an unknown worker type! Please fix!")
|
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
|
||||||
|
|
||||||
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
|
# For each worker type specified by the user, create config values and write it's
|
||||||
worker_type_counter[worker_type] = new_worker_count
|
# yaml config file
|
||||||
|
for worker_name, worker_types_set in requested_worker_types.items():
|
||||||
|
# The collected and processed data will live here.
|
||||||
|
worker_config: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
# Merge all worker config templates for this worker into a single config
|
||||||
|
for worker_type in worker_types_set:
|
||||||
|
copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
|
||||||
|
|
||||||
|
# Merge worker type template configuration data. It's a combination of lists
|
||||||
|
# and dicts, so use this helper.
|
||||||
|
worker_config = merge_worker_template_configs(
|
||||||
|
worker_config, copy_of_template_config
|
||||||
|
)
|
||||||
|
|
||||||
|
# Replace placeholder names in the config template with the actual worker name.
|
||||||
|
worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
|
||||||
|
|
||||||
# Name workers by their type concatenated with an incrementing number
|
|
||||||
# e.g. federation_reader1
|
|
||||||
worker_name = worker_type + str(new_worker_count)
|
|
||||||
worker_config.update(
|
worker_config.update(
|
||||||
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Update the shared config with any worker-type specific options
|
# Update the shared config with any worker_type specific options. The first of a
|
||||||
shared_config.update(worker_config["shared_extra_conf"])
|
# given worker_type needs to stay assigned and not be replaced.
|
||||||
|
worker_config["shared_extra_conf"].update(shared_config)
|
||||||
|
shared_config = worker_config["shared_extra_conf"]
|
||||||
|
|
||||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||||
|
|
||||||
# Check if more than one instance of this worker type has been specified
|
|
||||||
worker_type_total_count = worker_types.count(worker_type)
|
|
||||||
|
|
||||||
# Update the shared config with sharding-related options if necessary
|
# Update the shared config with sharding-related options if necessary
|
||||||
add_worker_roles_to_shared_config(
|
add_worker_roles_to_shared_config(
|
||||||
shared_config, worker_type, worker_name, worker_port
|
shared_config, worker_types_set, worker_name, worker_port
|
||||||
)
|
)
|
||||||
|
|
||||||
# Enable the worker in supervisord
|
# Enable the worker in supervisord
|
||||||
worker_descriptors.append(worker_config)
|
worker_descriptors.append(worker_config)
|
||||||
|
|
||||||
# Add nginx location blocks for this worker's endpoints (if any are defined)
|
|
||||||
for pattern in worker_config["endpoint_patterns"]:
|
|
||||||
# Determine whether we need to load-balance this worker
|
|
||||||
if worker_type_total_count > 1:
|
|
||||||
# Create or add to a load-balanced upstream for this worker
|
|
||||||
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
|
||||||
|
|
||||||
# Upstreams are named after the worker_type
|
|
||||||
upstream = "http://" + worker_type
|
|
||||||
else:
|
|
||||||
upstream = "http://localhost:%d" % (worker_port,)
|
|
||||||
|
|
||||||
# Note that this endpoint should proxy to this upstream
|
|
||||||
nginx_locations[pattern] = upstream
|
|
||||||
|
|
||||||
# Write out the worker's logging config file
|
# Write out the worker's logging config file
|
||||||
|
|
||||||
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
||||||
|
|
||||||
# Then a worker config file
|
# Then a worker config file
|
||||||
|
@ -539,6 +821,10 @@ def generate_worker_files(
|
||||||
worker_log_config_filepath=log_config_filepath,
|
worker_log_config_filepath=log_config_filepath,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Save this worker's port number to the correct nginx upstreams
|
||||||
|
for worker_type in worker_types_set:
|
||||||
|
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
||||||
|
|
||||||
worker_port += 1
|
worker_port += 1
|
||||||
|
|
||||||
# Build the nginx location config blocks
|
# Build the nginx location config blocks
|
||||||
|
@ -551,15 +837,14 @@ def generate_worker_files(
|
||||||
|
|
||||||
# Determine the load-balancing upstreams to configure
|
# Determine the load-balancing upstreams to configure
|
||||||
nginx_upstream_config = ""
|
nginx_upstream_config = ""
|
||||||
|
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||||
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
|
|
||||||
body = ""
|
body = ""
|
||||||
for port in upstream_worker_ports:
|
for port in upstream_worker_ports:
|
||||||
body += " server localhost:%d;\n" % (port,)
|
body += f" server localhost:{port};\n"
|
||||||
|
|
||||||
# Add to the list of configured upstreams
|
# Add to the list of configured upstreams
|
||||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||||
upstream_worker_type=upstream_worker_type,
|
upstream_worker_base_name=upstream_worker_base_name,
|
||||||
body=body,
|
body=body,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -580,7 +865,7 @@ def generate_worker_files(
|
||||||
if reg_path.suffix.lower() in (".yaml", ".yml")
|
if reg_path.suffix.lower() in (".yaml", ".yml")
|
||||||
]
|
]
|
||||||
|
|
||||||
workers_in_use = len(worker_types) > 0
|
workers_in_use = len(requested_worker_types) > 0
|
||||||
|
|
||||||
# Shared homeserver config
|
# Shared homeserver config
|
||||||
convert(
|
convert(
|
||||||
|
@ -678,13 +963,26 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||||
generate_base_homeserver_config()
|
generate_base_homeserver_config()
|
||||||
else:
|
else:
|
||||||
log("Base homeserver config exists—not regenerating")
|
log("Base homeserver config exists—not regenerating")
|
||||||
# This script may be run multiple times (mostly by Complement, see note at top of file).
|
# This script may be run multiple times (mostly by Complement, see note at top of
|
||||||
# Don't re-configure workers in this instance.
|
# file). Don't re-configure workers in this instance.
|
||||||
mark_filepath = "/conf/workers_have_been_configured"
|
mark_filepath = "/conf/workers_have_been_configured"
|
||||||
if not os.path.exists(mark_filepath):
|
if not os.path.exists(mark_filepath):
|
||||||
|
# Collect and validate worker_type requests
|
||||||
|
# Read the desired worker configuration from the environment
|
||||||
|
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
||||||
|
# Only process worker_types if they exist
|
||||||
|
if not worker_types_env:
|
||||||
|
# No workers, just the main process
|
||||||
|
worker_types = []
|
||||||
|
requested_worker_types: Dict[str, Any] = {}
|
||||||
|
else:
|
||||||
|
# Split type names by comma, ignoring whitespace.
|
||||||
|
worker_types = split_and_strip_string(worker_types_env, ",")
|
||||||
|
requested_worker_types = parse_worker_types(worker_types)
|
||||||
|
|
||||||
# Always regenerate all other config files
|
# Always regenerate all other config files
|
||||||
log("Generating worker config files")
|
log("Generating worker config files")
|
||||||
generate_worker_files(environ, config_path, data_dir)
|
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
|
||||||
|
|
||||||
# Mark workers as being configured
|
# Mark workers as being configured
|
||||||
with open(mark_filepath, "w") as f:
|
with open(mark_filepath, "w") as f:
|
||||||
|
|
|
@ -26,8 +26,8 @@ for most users.
|
||||||
#### Docker images and Ansible playbooks
|
#### Docker images and Ansible playbooks
|
||||||
|
|
||||||
There is an official synapse image available at
|
There is an official synapse image available at
|
||||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse)
|
||||||
the docker-compose file available at
|
which can be used with the docker-compose file available at
|
||||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
||||||
Further information on this including configuration options is available in the README
|
Further information on this including configuration options is available in the README
|
||||||
on hub.docker.com.
|
on hub.docker.com.
|
||||||
|
|
|
@ -231,6 +231,7 @@ information.
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
||||||
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
|
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
|
||||||
|
^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||||
^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
|
^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
|
||||||
|
|
||||||
|
@ -244,6 +245,7 @@ information.
|
||||||
# Registration/login requests
|
# Registration/login requests
|
||||||
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
||||||
^/_matrix/client/(r0|v3|unstable)/register$
|
^/_matrix/client/(r0|v3|unstable)/register$
|
||||||
|
^/_matrix/client/(r0|v3|unstable)/register/available$
|
||||||
^/_matrix/client/v1/register/m.login.registration_token/validity$
|
^/_matrix/client/v1/register/m.login.registration_token/validity$
|
||||||
|
|
||||||
# Event sending requests
|
# Event sending requests
|
||||||
|
|
8
mypy.ini
8
mypy.ini
|
@ -48,9 +48,6 @@ warn_unused_ignores = False
|
||||||
[mypy-synapse.util.caches.treecache]
|
[mypy-synapse.util.caches.treecache]
|
||||||
disallow_untyped_defs = False
|
disallow_untyped_defs = False
|
||||||
|
|
||||||
[mypy-synapse.storage.database]
|
|
||||||
disallow_untyped_defs = False
|
|
||||||
|
|
||||||
[mypy-tests.util.caches.test_descriptors]
|
[mypy-tests.util.caches.test_descriptors]
|
||||||
disallow_untyped_defs = False
|
disallow_untyped_defs = False
|
||||||
|
|
||||||
|
@ -74,11 +71,6 @@ ignore_missing_imports = True
|
||||||
[mypy-msgpack]
|
[mypy-msgpack]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
# Note: WIP stubs available at
|
|
||||||
# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
|
|
||||||
[mypy-netaddr]
|
|
||||||
ignore_missing_imports = True
|
|
||||||
|
|
||||||
[mypy-parameterized.*]
|
[mypy-parameterized.*]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
|
|
||||||
|
|
|
@ -352,35 +352,35 @@ files = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cryptography"
|
name = "cryptography"
|
||||||
version = "39.0.1"
|
version = "39.0.2"
|
||||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||||
category = "main"
|
category = "main"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"},
|
{file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:2725672bb53bb92dc7b4150d233cd4b8c59615cd8288d495eaa86db00d4e5c06"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"},
|
{file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:23df8ca3f24699167daf3e23e51f7ba7334d504af63a94af468f468b975b7dd7"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"},
|
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:eb40fe69cfc6f5cdab9a5ebd022131ba21453cf7b8a7fd3631f45bbf52bed612"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"},
|
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc0521cce2c1d541634b19f3ac661d7a64f9555135e9d8af3980965be717fd4a"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"},
|
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd394c7896ed7821a6d13b24657c6a34b6e2650bd84ae063cf11ccffa4f1a97"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"},
|
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:e8a0772016feeb106efd28d4a328e77dc2edae84dfbac06061319fdb669ff828"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"},
|
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8f35c17bd4faed2bc7797d2a66cbb4f986242ce2e30340ab832e5d99ae60e011"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"},
|
{file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b49a88ff802e1993b7f749b1eeb31134f03c8d5c956e3c125c75558955cda536"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"},
|
{file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c682e736513db7d04349b4f6693690170f95aac449c56f97415c6980edef5"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"},
|
{file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d7d84a512a59f4412ca8549b01f94be4161c94efc598bf09d027d67826beddc0"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"},
|
{file = "cryptography-39.0.2-cp36-abi3-win32.whl", hash = "sha256:c43ac224aabcbf83a947eeb8b17eaf1547bce3767ee2d70093b461f31729a480"},
|
||||||
{file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"},
|
{file = "cryptography-39.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:788b3921d763ee35dfdb04248d0e3de11e3ca8eb22e2e48fef880c42e1f3c8f9"},
|
||||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"},
|
{file = "cryptography-39.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d15809e0dbdad486f4ad0979753518f47980020b7a34e9fc56e8be4f60702fac"},
|
||||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"},
|
{file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50cadb9b2f961757e712a9737ef33d89b8190c3ea34d0fb6675e00edbe35d074"},
|
||||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"},
|
{file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:103e8f7155f3ce2ffa0049fe60169878d47a4364b277906386f8de21c9234aa1"},
|
||||||
{file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"},
|
{file = "cryptography-39.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6236a9610c912b129610eb1a274bdc1350b5df834d124fa84729ebeaf7da42c3"},
|
||||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"},
|
{file = "cryptography-39.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e944fe07b6f229f4c1a06a7ef906a19652bdd9fd54c761b0ff87e83ae7a30354"},
|
||||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"},
|
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:35d658536b0a4117c885728d1a7032bdc9a5974722ae298d6c533755a6ee3915"},
|
||||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"},
|
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30b1d1bfd00f6fc80d11300a29f1d8ab2b8d9febb6ed4a38a76880ec564fae84"},
|
||||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"},
|
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e029b844c21116564b8b61216befabca4b500e6816fa9f0ba49527653cae2108"},
|
||||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"},
|
{file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fa507318e427169ade4e9eccef39e9011cdc19534f55ca2f36ec3f388c1f70f3"},
|
||||||
{file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"},
|
{file = "cryptography-39.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8bc0008ef798231fac03fe7d26e82d601d15bd16f3afaad1c6113771566570f3"},
|
||||||
{file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"},
|
{file = "cryptography-39.0.2.tar.gz", hash = "sha256:bc5b871e977c8ee5a1bbc42fa8d19bcc08baf0c51cbf1586b0e87a2694dde42f"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -497,14 +497,14 @@ smmap = ">=3.0.1,<6"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "gitpython"
|
name = "gitpython"
|
||||||
version = "3.1.30"
|
version = "3.1.31"
|
||||||
description = "GitPython is a python library used to interact with Git repositories"
|
description = "GitPython is a Python library used to interact with Git repositories"
|
||||||
category = "dev"
|
category = "dev"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"},
|
{file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"},
|
||||||
{file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"},
|
{file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -513,101 +513,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hiredis"
|
name = "hiredis"
|
||||||
version = "2.2.1"
|
version = "2.2.2"
|
||||||
description = "Python wrapper for hiredis"
|
description = "Python wrapper for hiredis"
|
||||||
category = "main"
|
category = "main"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:998ab35070dc81806a23be5de837466a51b25e739fb1a0d5313474d5bb29c829"},
|
{file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:ba6123ff137275e2f4c31fc74b93813fcbb79160d43f5357163e09638c7743de"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:70db8f514ebcb6f884497c4eee21d0350bbc4102e63502411f8e100cf3b7921e"},
|
{file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d995846acc8e3339fb7833cd19bf6f3946ff5157c8488a4df9c51cd119a36870"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a57a4a33a78e94618d026fc68e853d3f71fa4a1d4da7a6e828e927819b001f2d"},
|
{file = "hiredis-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82f869ca44bcafa37cd71cfa1429648fa354d6021dcd72f03a2f66bcb339c546"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:209b94fa473b39e174b665186cad73206ca849cf6e822900b761e83080f67b06"},
|
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa90a5ee7a7f30c3d72d3513914b8f51f953a71b8cbd52a241b6db6685e55645"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:58e51d83b42fdcc29780897641b1dcb30c0e4d3c4f6d9d71d79b2cfec99b8eb7"},
|
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01e2e588392b5fdcc3a6aa0eb62a2eb2a142f829082fa4c3354228029d3aa1ce"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:706995fb1173fab7f12110fbad00bb95dd0453336f7f0b341b4ca7b1b9ff0bc7"},
|
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dac177a6ab8b4eb4d5e74978c29eef7cc9eef14086f814cb3893f7465578044"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812e27a9b20db967f942306267bcd8b1369d7c171831b6f45d22d75576cd01cd"},
|
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb992e3f9753c5a0c637f333c2010d1ad702aebf2d730ee4d484f32b19bae97"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69c32d54ac1f6708145c77d79af12f7448ca1025a0bf912700ad1f0be511026a"},
|
{file = "hiredis-2.2.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61c22fda5fc25d31bbced24a8322d33c5cb8cad9ba698634c16edb5b3e79a91"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:96745c4cdca261a50bd70c01f14c6c352a48c4d6a78e2d422040fba7919eadef"},
|
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9873898e26e50cd41415e9d1ea128bfdb60eb26abb4f5be28a4500fd7834dc0c"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:943631a49d7746cd413acaf0b712d030a15f02671af94c54759ba3144351f97a"},
|
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2c18b00a382546e19bcda8b83dcca5b6e0dbc238d235723434405f48a18e8f77"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:796b616478a5c1cac83e9e10fcd803e746e5a02461bfa7767aebae8b304e2124"},
|
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8c3a6998f6f88d7ca4d082fd26525074df13162b274d7c64034784b6fdc56666"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:341952a311654c39433c1e0d8d31c2a0c5864b2675ed159ed264ecaa5cfb225b"},
|
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0fc1f9a9791d028b2b8afa318ccff734c7fc8861d37a04ca9b3d27c9b05f9718"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6fbb1a56d455602bd6c276d5c316ae245111b2dc8158355112f2d905e7471c85"},
|
{file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f2cfd323f83985f2bed6ed013107873275025af270485b7d04c338bfb47bd14"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-win32.whl", hash = "sha256:14f67987e1d55b197e46729d1497019228ad8c94427bb63500e6f217aa586ca5"},
|
{file = "hiredis-2.2.2-cp310-cp310-win32.whl", hash = "sha256:55c7e9a9e05f8c0555bfba5c16d98492f8b6db650e56d0c35cc28aeabfc86020"},
|
||||||
{file = "hiredis-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:ea011b3bfa37f2746737860c1e5ba198b63c9b4764e40b042aac7bd2c258938f"},
|
{file = "hiredis-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:eaff526c2fed31c971b0fa338a25237ae5513550ef75d0b85b9420ec778cca45"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:103bde304d558061c4ba1d7ff94351e761da753c28883fd68964f25080152dac"},
|
{file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:688b9b7458b4f3f452fea6ed062c04fa1fd9a69d9223d95c6cb052581aba553b"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6ba9f425739a55e1409fda5dafad7fdda91c6dcd2b111ba93bb7b53d90737506"},
|
{file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:544d52fde3a8dac7854673eac20deca05214758193c01926ffbb0d57c6bf4ffe"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cb59a7535e0b8373f694ce87576c573f533438c5fbee450193333a22118f4a98"},
|
{file = "hiredis-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:990916e8b0b4eedddef787e73549b562f8c9e73a7fea82f9b8ff517806774ad0"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afbddc82bbb2c4c405d9a49a056ffe6541f8ad3160df49a80573b399f94ba3a"},
|
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10dc34854e9acfb3e7cc4157606e2efcb497b1c6fca07bd6c3be34ae5e413f13"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a386f00800b1b043b091b93850e02814a8b398952438a9d4895bd70f5c80a821"},
|
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c446a2007985ae49c2ecd946dd819dea72b931beb5f647ba08655a1a1e133fa8"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fec7465caac7b0a36551abb37066221cabf59f776d78fdd58ff17669942b4b41"},
|
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b9f928dc6cd43ed0f0ffc1c75fb209fb180f004b7e2e19994805f998d247aa"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd590dd7858d0107c37b438aa27bbcaa0ba77c5b8eda6ebab7acff0aa89f7d7"},
|
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a355aff8dfa02ebfe67f0946dd706e490bddda9ea260afac9cdc43942310c53"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1523ec56d711bee863aaaf4325cef4430da3143ec388e60465f47e28818016cd"},
|
{file = "hiredis-2.2.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831461abe5b63e73719621a5f31d8fc175528a05dc09d5a8aa8ef565d6deefa4"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4f6bbe599d255a504ef789c19e23118c654d256343c1ecdf7042fb4b4d0f7fa"},
|
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75349f7c8f77eb0fd33ede4575d1e5b0a902a8176a436bf03293d7fec4bd3894"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d77dbc13d55c1d45d6a203da910002fffd13fa310af5e9c5994959587a192789"},
|
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1eb39b34d15220095dc49ad1e1082580d35cd3b6d9741def52988b5075e4ff03"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b2b847ea3f9af99e02c4c58b7cc6714e105c8d73705e5ff1132e9a249391f688"},
|
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9b306f4e870747eea8b008dcba2e9f1e4acd12b333a684bc1cc120e633a280e"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:18135ecf28fc6577e71c0f8d8eb2f31e4783020a7d455571e7e5d2793374ce20"},
|
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:03dfb4ab7a2136ce1be305592553f102e1bd91a96068ab2778e3252aed20d9bc"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:724aed63871bc386d6f28b5f4d15490d84934709f093e021c4abb785e72db5db"},
|
{file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8bc89c7e33fecb083a199ade0131a34d20365a8c32239e218da57290987ca9a"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-win32.whl", hash = "sha256:497a8837984ddfbf6f5a4c034c0107f2c5aaaebeebf34e2c6ab591acffce5f12"},
|
{file = "hiredis-2.2.2-cp311-cp311-win32.whl", hash = "sha256:ed44b3c711cecde920f238ac35f70ac08744f2079b6369655856e43944464a72"},
|
||||||
{file = "hiredis-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:1776db8af168b22588ec10c3df674897b20cc6d25f093cd2724b8b26d7dac057"},
|
{file = "hiredis-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:2e2f0ce3e8ab1314a52f562386220f6714fd24d7968a95528135ad04e88cc741"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:49a518b456403602775218062a4dd06bed42b26854ff1ff6784cfee2ef6fa347"},
|
{file = "hiredis-2.2.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e7e61ab75b851aac2d6bc634d03738a242a6ef255a44178437b427c5ebac0a87"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02118dc8545e2371448b9983a0041f12124eea907eb61858f2be8e7c1dfa1e43"},
|
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eb14339e399554bb436cc4628e8aaa3943adf7afcf34aba4cbd1e3e6b9ec7ec"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78f2a53149b116e0088f6eda720574f723fbc75189195aab8a7a2a591ca89cab"},
|
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ec57886f20f4298537cb1ab9dbda98594fb8d7c724c5fbf9a4b55329fd4a63"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e3b8f0eba6d88c2aec63e6d1e38960f8a25c01f9796d32993ffa1cfcf48744c"},
|
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a89f5afb9827eab07b9c8c585cd4dc95e5232c727508ae2c935d09531abe9e33"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38270042f40ed9e576966c603d06c984c80364b0d9ec86962a31551dae27b0cd"},
|
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3645590b9234cafd21c8ecfbf252ad9aa1d67629f4bdc98ba3627f48f8f7b5aa"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a11250dd0521e9f729325b19ce9121df4cbb80ad3468cc21e56803e8380bc4b"},
|
{file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99350e89f52186146938bdba0b9c6cd68802c20346707d6ca8366f2d69d89b2f"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:595474e6c25f1c3c8ec67d587188e7dd47c492829b2c7c5ba1b17ee9e7e9a9ea"},
|
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b5d290f3d8f7a05c4adbe6c355055b87c7081bfa1eccd1ae5491216307ee5f53"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8ad00a7621de8ef9ae1616cf24a53d48ad1a699b96668637559a8982d109a800"},
|
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c95be6f20377d5995ef41a98314542e194d2dc9c2579d8f130a1aea78d48fd42"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a5e5e51faa7cd02444d4ee1eb59e316c08e974bcfa3a959cb790bc4e9bb616c5"},
|
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e4e2da61a04251121cb551f569c3250e6e27e95f2a80f8351c36822eda1f5d2b"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:0a9493bbc477436a3725e99cfcba768f416ab70ab92956e373d1a3b480b1e204"},
|
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ac7f8d68826f95a3652e44b0c12bfa74d3aa6531d47d5dbe6a2fbfc7979bc20f"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:231e5836579fc75b25c6f9bb6213950ea3d39aadcfeb7f880211ca55df968342"},
|
{file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:359e662324318baadb768d3c4ade8c4bdcfbb313570eb01e15d75dc5db781815"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-win32.whl", hash = "sha256:2ed6c948648798b440a9da74db65cdd2ad22f38cf4687f5212df369031394591"},
|
{file = "hiredis-2.2.2-cp37-cp37m-win32.whl", hash = "sha256:fd0ca35e2cf44866137cbb5ae7e439fab18a0b0e0e1cf51d45137622d59ec012"},
|
||||||
{file = "hiredis-2.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c65f38418e35970d44f9b5a59533f0f60f14b9f91b712dba51092d2c74d4dcd1"},
|
{file = "hiredis-2.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c9488ffb10acc6b121c498875278b0a6715d193742dc92d21a281712169ac06d"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:2f6e80fb7cd4cc61af95ab2875801e4c36941a956c183297c3273cbfbbefa9d3"},
|
{file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:1570fe4f93bc1ea487fb566f2b863fd0ed146f643a4ea31e4e07036db9e0c7f8"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a54d2b3328a2305e0dfb257a4545053fdc64df0c64e0635982e191c846cc0456"},
|
{file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8753c561b37cccbda7264c9b4486e206a6318c18377cd647beb3aa41a15a6beb"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:33624903dfb629d6f7c17ed353b4b415211c29fd447f31e6bf03361865b97e68"},
|
{file = "hiredis-2.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a06d0dd84f10be6b15a92edbca2490b64917280f66d8267c63de99b6550308ad"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f4b92df1e69dc48411045d2117d1d27ec6b5f0dd2b6501759cea2f6c68d5618"},
|
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40ff3f1ec3a4046732e9e41df08dcb1a559847196755d295d43e32528aae39e6"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03c6a1f6bf2f64f40d076c997cdfcb8b3d1c9557dda6cb7bbad2c5c839921726"},
|
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24d856e13c02bd9d28a189e47be70cbba6f2c2a4bd85a8cc98819db9e7e3e06"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af3071d33432960cba88ce4e4932b508ab3e13ce41431c2a1b2dc9a988f7627"},
|
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ee9fe7cef505e8d925c70bebcc16bfab12aa7af922f948346baffd4730f7b00"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb3f56d371b560bf39fe45d29c24e3d819ae2399733e2c86394a34e76adab38"},
|
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03ab1d545794bb0e09f3b1e2c8b3adcfacd84f6f2d402bfdcd441a98c0e9643c"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5da26970c41084a2ac337a4f075301a78cffb0e0f3df5e98c3049fc95e10725c"},
|
{file = "hiredis-2.2.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14dfccf4696d75395c587a5dafafb4f7aa0a5d55309341d10bc2e7f1eaa20771"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d87f90064106dfd7d2cc7baeb007a8ca289ee985f4bf64bb627c50cdc34208ed"},
|
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2ddc573809ca4374da1b24b48604f34f3d5f0911fcccfb1c403ff8d8ca31c232"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c233199b9f4dd43e2297577e32ba5fcd0378871a47207bc424d5e5344d030a3e"},
|
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:24301ca2bf9b2f843b4c3015c90f161798fa3bbc5b95fd494785751b137dbbe2"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:99b5bcadd5e029234f89d244b86bc8d21093be7ac26111068bebd92a4a95dc73"},
|
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b083a69e158138ffa95740ff6984d328259387b5596908021b3ccb946469ff66"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ed79f65098c4643cb6ec4530b337535f00b58ea02e25180e3df15e9cc9da58dc"},
|
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8e16dc949cc2e9c5fbcd08de05b5fb61b89ff65738d772863c5c96248628830e"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7fd6394779c9a3b324b65394deadb949311662f3770bd34f904b8c04328082c"},
|
{file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:674f296c3c89cb53f97aa9ba2508d3f360ad481b9e0c0e3a59b342a15192adaf"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-win32.whl", hash = "sha256:bde0178e7e6c49e408b8d3a8c0ec8e69a23e8dc2ae29f87af2d74b21025385dc"},
|
{file = "hiredis-2.2.2-cp38-cp38-win32.whl", hash = "sha256:20ecbf87aac4f0f33f9c55ae15cb73b485d256c57518c590b7d0c9c152150632"},
|
||||||
{file = "hiredis-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:6f5f469ba5ae613e4c652cdedfc723aa802329fcc2d65df1e9ab0ac0de34ad9e"},
|
{file = "hiredis-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:b11960237a3025bf248135e5b497dc4923e83d137eb798fbfe78b40d57c4b156"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:e5945ef29a76ab792973bef1ffa2970d81dd22edb94dfa5d6cba48beb9f51962"},
|
{file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:18103090b8eda9c529830e26594e88b0b1472055785f3ed29b8adc694d03862a"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bad6e9a0e31678ee15ac3ef72e77c08177c86df05c37d2423ff3cded95131e51"},
|
{file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d1acb7c957e5343303b3862947df3232dc7395da320b3b9ae076dfaa56ad59dc"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e57dfcd72f036cce9eab77bc533a932444459f7e54d96a555d25acf2501048be"},
|
{file = "hiredis-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4997f55e1208af95a8fbd0fa187b04c672fcec8f66e49b9ab7fcc45cc1657dc4"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3afc76a012b907895e679d1e6bcc6394845d0cc91b75264711f8caf53d7b0f37"},
|
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449e18506d22af40977abd0f5a8979f57f88d4562fe591478a3438d76a15133d"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a99c0d50d1a31be285c83301eff4b911dca16aac1c3fe1875c7d6f517a1e9fc4"},
|
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a32a4474f7a4abdea954f3365608edee3f90f1de9fa05b81d214d4cad04c718a"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8849bc74473778c10377f82cf9a534e240734e2f9a92c181ef6d51b4e3d3eb2"},
|
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e86c800c6941698777fc58419216a66a7f76504f1cea72381d2ee206888e964d"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e199868fe78c2d175bbb7b88f5daf2eae4a643a62f03f8d6736f9832f04f88b"},
|
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73aa295c5369135247ff63aa1fbb116067485d0506cd787cc0c868e72bbee55"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0e98106a28fabb672bb014f6c4506cc67491e4cf9ac56d189cbb1e81a9a3e68"},
|
{file = "hiredis-2.2.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e10a66680023bd5c5a3d605dae0844e3dde60eac5b79e39f51395a2aceaf634"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0f2607e08dcb1c5d1e925c451facbfc357927acaa336a004552c32a6dd68e050"},
|
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03ab760fc96e0c5d36226eb727f30645bf6a53c97f14bfc0a4d0401bfc9b8af7"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:954abb363ed1d18dfb7510dbd89402cb7c21106307e04e2ee7bccf35a134f4dd"},
|
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:855d258e7f1aee3d7fbd5b1dc87790b1b5016e23d369a97b934a25ae7bc0171f"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0474ab858f5dd15be6b467d89ec14b4c287f53b55ca5455369c3a1a787ef3a24"},
|
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ccc33d87866d213f84f857a98f69c13f94fbf99a3304e328869890c9e49c8d65"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b90dd0adb1d659f8c94b32556198af1e61e38edd27fc7434d4b3b68ad4e51d37"},
|
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:339af17bb9817f8acb127247c79a99cad63db6738c0fb2aec9fa3d4f35d2a250"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a5dac3ae05bc64b233f950edf37dce9c904aedbc7e18cfc2adfb98edb85da46"},
|
{file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:57f73aa04d0b70ff436fb35fa7ea2b796aa7addbd7ebb8d1aa1f3d1b3e4439f1"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-win32.whl", hash = "sha256:19666eb154b7155d043bf941e50d1640125f92d3294e2746df87639cc44a10e6"},
|
{file = "hiredis-2.2.2-cp39-cp39-win32.whl", hash = "sha256:e97d4e650b8d933a1229f341db92b610fc52b8d752490235977b63b81fbbc2cb"},
|
||||||
{file = "hiredis-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:c702dd28d52656bb86f7a2a76ea9341ac434810871b51fcd6cd28c6d7490fbdf"},
|
{file = "hiredis-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:8d43a7bba66a800279e33229a206861be09c279e261eaa8db4824e59465f4848"},
|
||||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c604919bba041e4c4708ecb0fe6c7c8a92a7f1e886b0ae8d2c13c3e4abfc5eda"},
|
{file = "hiredis-2.2.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632d79fd02b03e8d9fbaebbe40bfe34b920c5d0a9c0ef6270752e0db85208175"},
|
||||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04c972593f26f4769e2be7058b7928179337593bcfc6a8b6bda87eea807b7cbf"},
|
{file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a5fefac31c84143782ec1ebc323c04e733a6e4bfebcef9907a34e47a465e648"},
|
||||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42504e4058246536a9f477f450ab21275126fc5f094be5d5e5290c6de9d855f9"},
|
{file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5155bc1710df8e21aa48c9b2f4d4e13e4987e1efff363a1ef9c84fae2cc6c145"},
|
||||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220b6ac9d3fce60d14ccc34f9790e20a50dc56b6fb747fc357600963c0cf6aca"},
|
{file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f220b71235d2deab1b4b22681c8aee444720d973b80f1b86a4e2a85f6bcf1e1"},
|
||||||
{file = "hiredis-2.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a16d81115128e6a9fc6904de051475be195f6c460c9515583dccfd407b16ff78"},
|
{file = "hiredis-2.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1f1efbe9cc29a3af39cf7eed27225f951aed3f48a1149c7fb74529fb5ab86d4"},
|
||||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:df6325aade17b1f86c8b87f6a1d9549a4184fda00e27e2fca0e5d2a987130365"},
|
{file = "hiredis-2.2.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1f1c44242c18b1f02e6d1162f133d65d00e09cc10d9165dccc78662def72abc2"},
|
||||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcad9c9239845b29f149a895e7e99b8307889cecbfc37b69924c2dad1f4ae4e8"},
|
{file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0f444d9062f7e487ef42bab2fb2e290f1704afcbca48ad3ec23de63eef0fda"},
|
||||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ccf6fc116795d76bca72aa301a33874c507f9e77402e857d298c73419b5ea3"},
|
{file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac15e7e1efca51b4695e540c80c328accb352c9608da7c2df82d1fa1a3c539ef"},
|
||||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63f941e77c024be2a1451089e2fdbd5ff450ff0965f49948bbeb383aef1799ea"},
|
{file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20cfbc469400669a5999aa34ccba3872a1e34490ec3d5c84e8c0752c27977b7c"},
|
||||||
{file = "hiredis-2.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2bb682785a37145b209f44f5d5290b0f9f4b56205542fc592d0f1b3d5ffdfcf0"},
|
{file = "hiredis-2.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bae004a0b978bf62e38d0eef5ab9156f8101d01167b3ca7054bd0994b773e917"},
|
||||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8fe289556264cb1a2efbcd3d6b3c55e059394ad01b6afa88151264137f85c352"},
|
{file = "hiredis-2.2.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1ce725542133dbdda9e8704867ef52651886bd1ef568c6fd997a27404381985"},
|
||||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96b079c53b6acd355edb6fe615270613f3f7ddc4159d69837ce15ec518925c40"},
|
{file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6ea7532221c97fa6d79f7d19d452cd9d1141d759c54279cc4774ce24728f13"},
|
||||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82ad46d1140c5779cd9dfdafc35f47dd09dadff7654d8001c50bb283da82e7c9"},
|
{file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7114961ed78d708142f6c6eb1d2ed65dc3da4b5ae8a4660ad889dd7fc891971"},
|
||||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17e9f363db56a8edb4eff936354cfa273197465bcd970922f3d292032eca87b0"},
|
{file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b084fbc3e69f99865242f8e1ccd4ea2a34bf6a3983d015d61133377526c0ce2"},
|
||||||
{file = "hiredis-2.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ae6b356ed166a0ec663a46b547c988815d2b0e5f2d0af31ef34a16cf3ce705d0"},
|
{file = "hiredis-2.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2d1ba0799f3487294f72b2157944d5c3a4fb33c99e2d495d63eab98c7ec7234b"},
|
||||||
{file = "hiredis-2.2.1.tar.gz", hash = "sha256:d9fbef7f9070055a7cc012ac965e3dbabbf2400b395649ea8d6016dc82a7d13a"},
|
{file = "hiredis-2.2.2.tar.gz", hash = "sha256:9c270bd0567a9c60673284e000132f603bb4ecbcd707567647a68f85ef45c4d4"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1098,64 +1098,75 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "msgpack"
|
name = "msgpack"
|
||||||
version = "1.0.4"
|
version = "1.0.5"
|
||||||
description = "MessagePack serializer"
|
description = "MessagePack serializer"
|
||||||
category = "main"
|
category = "main"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
|
{file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"},
|
{file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"},
|
{file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"},
|
{file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"},
|
{file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"},
|
{file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"},
|
{file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"},
|
{file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"},
|
{file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"},
|
{file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"},
|
||||||
{file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"},
|
{file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"},
|
{file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"},
|
{file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"},
|
{file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"},
|
{file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"},
|
{file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"},
|
{file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"},
|
{file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"},
|
{file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"},
|
||||||
{file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"},
|
{file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"},
|
{file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"},
|
{file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"},
|
{file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"},
|
{file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"},
|
{file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"},
|
{file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"},
|
{file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"},
|
{file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"},
|
||||||
{file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"},
|
{file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"},
|
{file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"},
|
{file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"},
|
{file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"},
|
{file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"},
|
{file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"},
|
{file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"},
|
{file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"},
|
{file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"},
|
{file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"},
|
{file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"},
|
||||||
{file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"},
|
{file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"},
|
{file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"},
|
{file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"},
|
{file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"},
|
{file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"},
|
{file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"},
|
{file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"},
|
{file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"},
|
{file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"},
|
{file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"},
|
{file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"},
|
||||||
{file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"},
|
{file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"},
|
||||||
{file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"},
|
{file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"},
|
||||||
|
{file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"},
|
||||||
|
{file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -1557,48 +1568,48 @@ files = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pydantic"
|
name = "pydantic"
|
||||||
version = "1.10.4"
|
version = "1.10.6"
|
||||||
description = "Data validation and settings management using python type hints"
|
description = "Data validation and settings management using python type hints"
|
||||||
category = "main"
|
category = "main"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "pydantic-1.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854"},
|
{file = "pydantic-1.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9289065611c48147c1dd1fd344e9d57ab45f1d99b0fb26c51f1cf72cd9bcd31"},
|
||||||
{file = "pydantic-1.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817"},
|
{file = "pydantic-1.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c32b6bba301490d9bb2bf5f631907803135e8085b6aa3e5fe5a770d46dd0160"},
|
||||||
{file = "pydantic-1.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c"},
|
{file = "pydantic-1.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd9b9e98068fa1068edfc9eabde70a7132017bdd4f362f8b4fd0abed79c33083"},
|
||||||
{file = "pydantic-1.10.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e"},
|
{file = "pydantic-1.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c84583b9df62522829cbc46e2b22e0ec11445625b5acd70c5681ce09c9b11c4"},
|
||||||
{file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85"},
|
{file = "pydantic-1.10.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b41822064585fea56d0116aa431fbd5137ce69dfe837b599e310034171996084"},
|
||||||
{file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6"},
|
{file = "pydantic-1.10.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61f1f08adfaa9cc02e0cbc94f478140385cbd52d5b3c5a657c2fceb15de8d1fb"},
|
||||||
{file = "pydantic-1.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d"},
|
{file = "pydantic-1.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:32937835e525d92c98a1512218db4eed9ddc8f4ee2a78382d77f54341972c0e7"},
|
||||||
{file = "pydantic-1.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53"},
|
{file = "pydantic-1.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bbd5c531b22928e63d0cb1868dee76123456e1de2f1cb45879e9e7a3f3f1779b"},
|
||||||
{file = "pydantic-1.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3"},
|
{file = "pydantic-1.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e277bd18339177daa62a294256869bbe84df1fb592be2716ec62627bb8d7c81d"},
|
||||||
{file = "pydantic-1.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a"},
|
{file = "pydantic-1.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f15277d720aa57e173954d237628a8d304896364b9de745dcb722f584812c7"},
|
||||||
{file = "pydantic-1.10.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"},
|
{file = "pydantic-1.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b243b564cea2576725e77aeeda54e3e0229a168bc587d536cd69941e6797543d"},
|
||||||
{file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72"},
|
{file = "pydantic-1.10.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3ce13a558b484c9ae48a6a7c184b1ba0e5588c5525482681db418268e5f86186"},
|
||||||
{file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857"},
|
{file = "pydantic-1.10.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3ac1cd4deed871dfe0c5f63721e29debf03e2deefa41b3ed5eb5f5df287c7b70"},
|
||||||
{file = "pydantic-1.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3"},
|
{file = "pydantic-1.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:b1eb6610330a1dfba9ce142ada792f26bbef1255b75f538196a39e9e90388bf4"},
|
||||||
{file = "pydantic-1.10.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00"},
|
{file = "pydantic-1.10.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4ca83739c1263a044ec8b79df4eefc34bbac87191f0a513d00dd47d46e307a65"},
|
||||||
{file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978"},
|
{file = "pydantic-1.10.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea4e2a7cb409951988e79a469f609bba998a576e6d7b9791ae5d1e0619e1c0f2"},
|
||||||
{file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e"},
|
{file = "pydantic-1.10.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53de12b4608290992a943801d7756f18a37b7aee284b9ffa794ee8ea8153f8e2"},
|
||||||
{file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa"},
|
{file = "pydantic-1.10.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:60184e80aac3b56933c71c48d6181e630b0fbc61ae455a63322a66a23c14731a"},
|
||||||
{file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8"},
|
{file = "pydantic-1.10.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:415a3f719ce518e95a92effc7ee30118a25c3d032455d13e121e3840985f2efd"},
|
||||||
{file = "pydantic-1.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903"},
|
{file = "pydantic-1.10.6-cp37-cp37m-win_amd64.whl", hash = "sha256:72cb30894a34d3a7ab6d959b45a70abac8a2a93b6480fc5a7bfbd9c935bdc4fb"},
|
||||||
{file = "pydantic-1.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423"},
|
{file = "pydantic-1.10.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3091d2eaeda25391405e36c2fc2ed102b48bac4b384d42b2267310abae350ca6"},
|
||||||
{file = "pydantic-1.10.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f"},
|
{file = "pydantic-1.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:751f008cd2afe812a781fd6aa2fb66c620ca2e1a13b6a2152b1ad51553cb4b77"},
|
||||||
{file = "pydantic-1.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06"},
|
{file = "pydantic-1.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12e837fd320dd30bd625be1b101e3b62edc096a49835392dcf418f1a5ac2b832"},
|
||||||
{file = "pydantic-1.10.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15"},
|
{file = "pydantic-1.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d92831d0115874d766b1f5fddcdde0c5b6c60f8c6111a394078ec227fca6d"},
|
||||||
{file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c"},
|
{file = "pydantic-1.10.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:476f6674303ae7965730a382a8e8d7fae18b8004b7b69a56c3d8fa93968aa21c"},
|
||||||
{file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416"},
|
{file = "pydantic-1.10.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3a2be0a0f32c83265fd71a45027201e1278beaa82ea88ea5b345eea6afa9ac7f"},
|
||||||
{file = "pydantic-1.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6"},
|
{file = "pydantic-1.10.6-cp38-cp38-win_amd64.whl", hash = "sha256:0abd9c60eee6201b853b6c4be104edfba4f8f6c5f3623f8e1dba90634d63eb35"},
|
||||||
{file = "pydantic-1.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d"},
|
{file = "pydantic-1.10.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6195ca908045054dd2d57eb9c39a5fe86409968b8040de8c2240186da0769da7"},
|
||||||
{file = "pydantic-1.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f"},
|
{file = "pydantic-1.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43cdeca8d30de9a897440e3fb8866f827c4c31f6c73838e3a01a14b03b067b1d"},
|
||||||
{file = "pydantic-1.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024"},
|
{file = "pydantic-1.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c19eb5163167489cb1e0161ae9220dadd4fc609a42649e7e84a8fa8fff7a80f"},
|
||||||
{file = "pydantic-1.10.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c"},
|
{file = "pydantic-1.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:012c99a9c0d18cfde7469aa1ebff922e24b0c706d03ead96940f5465f2c9cf62"},
|
||||||
{file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28"},
|
{file = "pydantic-1.10.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:528dcf7ec49fb5a84bf6fe346c1cc3c55b0e7603c2123881996ca3ad79db5bfc"},
|
||||||
{file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f"},
|
{file = "pydantic-1.10.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:163e79386c3547c49366e959d01e37fc30252285a70619ffc1b10ede4758250a"},
|
||||||
{file = "pydantic-1.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4"},
|
{file = "pydantic-1.10.6-cp39-cp39-win_amd64.whl", hash = "sha256:189318051c3d57821f7233ecc94708767dd67687a614a4e8f92b4a020d4ffd06"},
|
||||||
{file = "pydantic-1.10.4-py3-none-any.whl", hash = "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774"},
|
{file = "pydantic-1.10.6-py3-none-any.whl", hash = "sha256:acc6783751ac9c9bc4680379edd6d286468a1dc8d7d9906cd6f1186ed682b2b0"},
|
||||||
{file = "pydantic-1.10.4.tar.gz", hash = "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648"},
|
{file = "pydantic-1.10.6.tar.gz", hash = "sha256:cf95adb0d1671fc38d8c43dd921ad5814a735e7d9b4d9e437c088002863854fd"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -1610,25 +1621,22 @@ email = ["email-validator (>=1.0.3)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pygithub"
|
name = "pygithub"
|
||||||
version = "1.57"
|
version = "1.58.1"
|
||||||
description = "Use the full Github API v3"
|
description = "Use the full Github API v3"
|
||||||
category = "dev"
|
category = "dev"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
|
{file = "PyGithub-1.58.1-py3-none-any.whl", hash = "sha256:4e7fe9c3ec30d5fde5b4fbb97f18821c9dbf372bf6df337fe66f6689a65e0a83"},
|
||||||
{file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
|
{file = "PyGithub-1.58.1.tar.gz", hash = "sha256:7d528b4ad92bc13122129fafd444ce3d04c47d2d801f6446b6e6ee2d410235b3"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
deprecated = "*"
|
deprecated = "*"
|
||||||
pyjwt = ">=2.4.0"
|
pyjwt = {version = ">=2.4.0", extras = ["crypto"]}
|
||||||
pynacl = ">=1.4.0"
|
pynacl = ">=1.4.0"
|
||||||
requests = ">=2.14.0"
|
requests = ">=2.14.0"
|
||||||
|
|
||||||
[package.extras]
|
|
||||||
integrations = ["cryptography"]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pygments"
|
name = "pygments"
|
||||||
version = "2.11.2"
|
version = "2.11.2"
|
||||||
|
@ -1664,6 +1672,9 @@ files = [
|
||||||
{file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
|
{file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
cryptography = {version = ">=3.3.1", optional = true, markers = "extra == \"crypto\""}
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
crypto = ["cryptography (>=3.3.1)"]
|
crypto = ["cryptography (>=3.3.1)"]
|
||||||
dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
|
dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||||
|
@ -1777,26 +1788,25 @@ files = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pysaml2"
|
name = "pysaml2"
|
||||||
version = "7.2.1"
|
version = "7.3.1"
|
||||||
description = "Python implementation of SAML Version 2 Standard"
|
description = "Python implementation of SAML Version 2 Standard"
|
||||||
category = "main"
|
category = "main"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = "<4,>=3.6"
|
python-versions = ">=3.6.2,<4.0.0"
|
||||||
files = [
|
files = [
|
||||||
{file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"},
|
{file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"},
|
||||||
{file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"},
|
{file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
cryptography = ">=3.1"
|
cryptography = ">=3.1"
|
||||||
defusedxml = "*"
|
defusedxml = "*"
|
||||||
|
importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""}
|
||||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||||
pyOpenSSL = "*"
|
pyopenssl = "*"
|
||||||
python-dateutil = "*"
|
python-dateutil = "*"
|
||||||
pytz = "*"
|
pytz = "*"
|
||||||
requests = ">=1.0.0"
|
requests = ">=2,<3"
|
||||||
setuptools = "*"
|
|
||||||
six = "*"
|
|
||||||
xmlschema = ">=1.2.1"
|
xmlschema = ">=1.2.1"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
|
@ -2513,14 +2523,14 @@ files = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "txredisapi"
|
name = "txredisapi"
|
||||||
version = "1.4.7"
|
version = "1.4.9"
|
||||||
description = "non-blocking redis client for python"
|
description = "non-blocking redis client for python"
|
||||||
category = "main"
|
category = "main"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
{file = "txredisapi-1.4.7-py3-none-any.whl", hash = "sha256:34c9eba8d34f452d30661f073b67b8cd42b695e3d31678ec1bbf628a65a0f059"},
|
{file = "txredisapi-1.4.9-py3-none-any.whl", hash = "sha256:72e6ad09cc5fffe3bec2e55e5bfb74407bd357565fc212e6003f7e26ef7d8f78"},
|
||||||
{file = "txredisapi-1.4.7.tar.gz", hash = "sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb"},
|
{file = "txredisapi-1.4.9.tar.gz", hash = "sha256:c9607062d05e4d0b8ef84719eb76a3fe7d5ccd606a2acf024429da51d6e84559"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -2597,6 +2607,18 @@ files = [
|
||||||
{file = "types_jsonschema-4.17.0.5-py3-none-any.whl", hash = "sha256:79ac8a7763fe728947af90a24168b91621edf7e8425bf3670abd4ea0d4758fba"},
|
{file = "types_jsonschema-4.17.0.5-py3-none-any.whl", hash = "sha256:79ac8a7763fe728947af90a24168b91621edf7e8425bf3670abd4ea0d4758fba"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "types-netaddr"
|
||||||
|
version = "0.8.0.6"
|
||||||
|
description = "Typing stubs for netaddr"
|
||||||
|
category = "dev"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
files = [
|
||||||
|
{file = "types-netaddr-0.8.0.6.tar.gz", hash = "sha256:e5048640c2412e7ea2d3eb02c94ae1b50442b2c7a50a7c48e957676139cdf19b"},
|
||||||
|
{file = "types_netaddr-0.8.0.6-py3-none-any.whl", hash = "sha256:d4d40d1ba35430a4e4c929596542cd37e6831f5d08676b33dc84e06e01a840f6"},
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "types-opentracing"
|
name = "types-opentracing"
|
||||||
version = "2.4.10.3"
|
version = "2.4.10.3"
|
||||||
|
@ -2662,14 +2684,14 @@ files = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "types-requests"
|
name = "types-requests"
|
||||||
version = "2.28.11.12"
|
version = "2.28.11.15"
|
||||||
description = "Typing stubs for requests"
|
description = "Typing stubs for requests"
|
||||||
category = "dev"
|
category = "dev"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
{file = "types-requests-2.28.11.12.tar.gz", hash = "sha256:fd530aab3fc4f05ee36406af168f0836e6f00f1ee51a0b96b7311f82cb675230"},
|
{file = "types-requests-2.28.11.15.tar.gz", hash = "sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09"},
|
||||||
{file = "types_requests-2.28.11.12-py3-none-any.whl", hash = "sha256:dbc2933635860e553ffc59f5e264264981358baffe6342b925e3eb8261f866ee"},
|
{file = "types_requests-2.28.11.15-py3-none-any.whl", hash = "sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -2990,4 +3012,4 @@ user-search = ["pyicu"]
|
||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.7.1"
|
python-versions = "^3.7.1"
|
||||||
content-hash = "7bcffef7b6e6d4b1113222e2ca152b3798c997872789c8a1ea01238f199d56fe"
|
content-hash = "de2c4c8de336593478ce02581a5336afe2544db93ea82f3955b34c3653c29a26"
|
||||||
|
|
|
@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "matrix-synapse"
|
name = "matrix-synapse"
|
||||||
version = "1.79.0rc2"
|
version = "1.79.0"
|
||||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
@ -321,6 +321,7 @@ mypy-zope = "*"
|
||||||
types-bleach = ">=4.1.0"
|
types-bleach = ">=4.1.0"
|
||||||
types-commonmark = ">=0.9.2"
|
types-commonmark = ">=0.9.2"
|
||||||
types-jsonschema = ">=3.2.0"
|
types-jsonschema = ">=3.2.0"
|
||||||
|
types-netaddr = ">=0.8.0.6"
|
||||||
types-opentracing = ">=2.4.2"
|
types-opentracing = ">=2.4.2"
|
||||||
types-Pillow = ">=8.3.4"
|
types-Pillow = ">=8.3.4"
|
||||||
types-psycopg2 = ">=2.9.9"
|
types-psycopg2 = ">=2.9.9"
|
||||||
|
|
|
@ -52,7 +52,6 @@ fn bench_match_exact(b: &mut Bencher) {
|
||||||
true,
|
true,
|
||||||
vec![],
|
vec![],
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -98,7 +97,6 @@ fn bench_match_word(b: &mut Bencher) {
|
||||||
true,
|
true,
|
||||||
vec![],
|
vec![],
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -144,7 +142,6 @@ fn bench_match_word_miss(b: &mut Bencher) {
|
||||||
true,
|
true,
|
||||||
vec![],
|
vec![],
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -190,7 +187,6 @@ fn bench_eval_message(b: &mut Bencher) {
|
||||||
true,
|
true,
|
||||||
vec![],
|
vec![],
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||||
priority_class: 5,
|
priority_class: 5,
|
||||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||||
EventMatchCondition {
|
EventMatchCondition {
|
||||||
key: Cow::Borrowed("content.m.relates_to.rel_type"),
|
key: Cow::Borrowed("content.m\\.relates_to.rel_type"),
|
||||||
pattern: Cow::Borrowed("m.replace"),
|
pattern: Cow::Borrowed("m.replace"),
|
||||||
},
|
},
|
||||||
))]),
|
))]),
|
||||||
|
@ -146,7 +146,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||||
priority_class: 5,
|
priority_class: 5,
|
||||||
conditions: Cow::Borrowed(&[Condition::Known(
|
conditions: Cow::Borrowed(&[Condition::Known(
|
||||||
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
|
KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
|
||||||
key: Cow::Borrowed("content.org.matrix.msc3952.mentions.user_ids"),
|
key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.user_ids"),
|
||||||
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
|
value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
|
||||||
}),
|
}),
|
||||||
)]),
|
)]),
|
||||||
|
@ -167,7 +167,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||||
priority_class: 5,
|
priority_class: 5,
|
||||||
conditions: Cow::Borrowed(&[
|
conditions: Cow::Borrowed(&[
|
||||||
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
|
Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
|
||||||
key: Cow::Borrowed("content.org.matrix.msc3952.mentions.room"),
|
key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.room"),
|
||||||
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
|
value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
|
||||||
})),
|
})),
|
||||||
Condition::Known(KnownCondition::SenderNotificationPermission {
|
Condition::Known(KnownCondition::SenderNotificationPermission {
|
||||||
|
|
|
@ -96,9 +96,6 @@ pub struct PushRuleEvaluator {
|
||||||
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
|
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
|
||||||
/// flag as MSC1767 (extensible events core).
|
/// flag as MSC1767 (extensible events core).
|
||||||
msc3931_enabled: bool,
|
msc3931_enabled: bool,
|
||||||
|
|
||||||
/// If MSC3966 (exact_event_property_contains push rule condition) is enabled.
|
|
||||||
msc3966_exact_event_property_contains: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pymethods]
|
#[pymethods]
|
||||||
|
@ -116,7 +113,6 @@ impl PushRuleEvaluator {
|
||||||
related_event_match_enabled: bool,
|
related_event_match_enabled: bool,
|
||||||
room_version_feature_flags: Vec<String>,
|
room_version_feature_flags: Vec<String>,
|
||||||
msc3931_enabled: bool,
|
msc3931_enabled: bool,
|
||||||
msc3966_exact_event_property_contains: bool,
|
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let body = match flattened_keys.get("content.body") {
|
let body = match flattened_keys.get("content.body") {
|
||||||
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
|
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
|
||||||
|
@ -134,7 +130,6 @@ impl PushRuleEvaluator {
|
||||||
related_event_match_enabled,
|
related_event_match_enabled,
|
||||||
room_version_feature_flags,
|
room_version_feature_flags,
|
||||||
msc3931_enabled,
|
msc3931_enabled,
|
||||||
msc3966_exact_event_property_contains,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -301,8 +296,8 @@ impl PushRuleEvaluator {
|
||||||
Some(Cow::Borrowed(pattern)),
|
Some(Cow::Borrowed(pattern)),
|
||||||
)?
|
)?
|
||||||
}
|
}
|
||||||
KnownCondition::ExactEventPropertyContains(event_property_is) => self
|
KnownCondition::EventPropertyContains(event_property_is) => self
|
||||||
.match_exact_event_property_contains(
|
.match_event_property_contains(
|
||||||
event_property_is.key.clone(),
|
event_property_is.key.clone(),
|
||||||
event_property_is.value.clone(),
|
event_property_is.value.clone(),
|
||||||
)?,
|
)?,
|
||||||
|
@ -321,7 +316,7 @@ impl PushRuleEvaluator {
|
||||||
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
|
EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
|
||||||
};
|
};
|
||||||
|
|
||||||
self.match_exact_event_property_contains(
|
self.match_event_property_contains(
|
||||||
exact_event_match.key.clone(),
|
exact_event_match.key.clone(),
|
||||||
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
|
Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())),
|
||||||
)?
|
)?
|
||||||
|
@ -454,17 +449,12 @@ impl PushRuleEvaluator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Evaluates a `exact_event_property_contains` condition. (MSC3966)
|
/// Evaluates a `event_property_contains` condition.
|
||||||
fn match_exact_event_property_contains(
|
fn match_event_property_contains(
|
||||||
&self,
|
&self,
|
||||||
key: Cow<str>,
|
key: Cow<str>,
|
||||||
value: Cow<SimpleJsonValue>,
|
value: Cow<SimpleJsonValue>,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
// First check if the feature is enabled.
|
|
||||||
if !self.msc3966_exact_event_property_contains {
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
let haystack = if let Some(JsonValue::Array(haystack)) = self.flattened_keys.get(&*key) {
|
let haystack = if let Some(JsonValue::Array(haystack)) = self.flattened_keys.get(&*key) {
|
||||||
haystack
|
haystack
|
||||||
} else {
|
} else {
|
||||||
|
@ -515,7 +505,6 @@ fn push_rule_evaluator() {
|
||||||
true,
|
true,
|
||||||
vec![],
|
vec![],
|
||||||
true,
|
true,
|
||||||
true,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -545,7 +534,6 @@ fn test_requires_room_version_supports_condition() {
|
||||||
false,
|
false,
|
||||||
flags,
|
flags,
|
||||||
true,
|
true,
|
||||||
true,
|
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|
|
@ -337,13 +337,9 @@ pub enum KnownCondition {
|
||||||
// Identical to related_event_match but gives predefined patterns. Cannot be added by users.
|
// Identical to related_event_match but gives predefined patterns. Cannot be added by users.
|
||||||
#[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")]
|
#[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")]
|
||||||
RelatedEventMatchType(RelatedEventMatchTypeCondition),
|
RelatedEventMatchType(RelatedEventMatchTypeCondition),
|
||||||
#[serde(rename = "org.matrix.msc3966.exact_event_property_contains")]
|
EventPropertyContains(EventPropertyIsCondition),
|
||||||
ExactEventPropertyContains(EventPropertyIsCondition),
|
|
||||||
// Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users.
|
// Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users.
|
||||||
#[serde(
|
#[serde(skip_deserializing, rename = "event_property_contains")]
|
||||||
skip_deserializing,
|
|
||||||
rename = "org.matrix.msc3966.exact_event_property_contains"
|
|
||||||
)]
|
|
||||||
ExactEventPropertyContainsType(EventPropertyIsTypeCondition),
|
ExactEventPropertyContainsType(EventPropertyIsTypeCondition),
|
||||||
ContainsDisplayName,
|
ContainsDisplayName,
|
||||||
RoomMemberCount {
|
RoomMemberCount {
|
||||||
|
|
|
@ -65,7 +65,6 @@ class PushRuleEvaluator:
|
||||||
related_event_match_enabled: bool,
|
related_event_match_enabled: bool,
|
||||||
room_version_feature_flags: Tuple[str, ...],
|
room_version_feature_flags: Tuple[str, ...],
|
||||||
msc3931_enabled: bool,
|
msc3931_enabled: bool,
|
||||||
msc3966_exact_event_property_contains: bool,
|
|
||||||
): ...
|
): ...
|
||||||
def run(
|
def run(
|
||||||
self,
|
self,
|
||||||
|
|
|
@ -108,6 +108,11 @@ class Codes(str, Enum):
|
||||||
|
|
||||||
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
|
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
|
||||||
|
|
||||||
|
AS_PING_URL_NOT_SET = "FI.MAU.MSC2659_URL_NOT_SET"
|
||||||
|
AS_PING_BAD_STATUS = "FI.MAU.MSC2659_BAD_STATUS"
|
||||||
|
AS_PING_CONNECTION_TIMEOUT = "FI.MAU.MSC2659_CONNECTION_TIMEOUT"
|
||||||
|
AS_PING_CONNECTION_FAILED = "FI.MAU.MSC2659_CONNECTION_FAILED"
|
||||||
|
|
||||||
# Attempt to send a second annotation with the same event type & annotation key
|
# Attempt to send a second annotation with the same event type & annotation key
|
||||||
# MSC2677
|
# MSC2677
|
||||||
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
|
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
|
||||||
|
|
|
@ -266,6 +266,19 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||||
key = (service.id, protocol)
|
key = (service.id, protocol)
|
||||||
return await self.protocol_meta_cache.wrap(key, _get)
|
return await self.protocol_meta_cache.wrap(key, _get)
|
||||||
|
|
||||||
|
async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None:
|
||||||
|
# The caller should check that url is set
|
||||||
|
assert service.url is not None, "ping called without URL being set"
|
||||||
|
|
||||||
|
# This is required by the configuration.
|
||||||
|
assert service.hs_token is not None
|
||||||
|
|
||||||
|
await self.post_json_get_json(
|
||||||
|
uri=service.url + "/_matrix/app/unstable/fi.mau.msc2659/ping",
|
||||||
|
post_json={"transaction_id": txn_id},
|
||||||
|
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||||
|
)
|
||||||
|
|
||||||
async def push_bulk(
|
async def push_bulk(
|
||||||
self,
|
self,
|
||||||
service: "ApplicationService",
|
service: "ApplicationService",
|
||||||
|
|
|
@ -166,20 +166,9 @@ class ExperimentalConfig(Config):
|
||||||
# MSC3391: Removing account data.
|
# MSC3391: Removing account data.
|
||||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||||
|
|
||||||
# MSC3873: Disambiguate event_match keys.
|
|
||||||
self.msc3873_escape_event_match_key = experimental.get(
|
|
||||||
"msc3873_escape_event_match_key", False
|
|
||||||
)
|
|
||||||
|
|
||||||
# MSC3966: exact_event_property_contains push rule condition.
|
|
||||||
self.msc3966_exact_event_property_contains = experimental.get(
|
|
||||||
"msc3966_exact_event_property_contains", False
|
|
||||||
)
|
|
||||||
|
|
||||||
# MSC3952: Intentional mentions, this depends on MSC3966.
|
# MSC3952: Intentional mentions, this depends on MSC3966.
|
||||||
self.msc3952_intentional_mentions = (
|
self.msc3952_intentional_mentions = experimental.get(
|
||||||
experimental.get("msc3952_intentional_mentions", False)
|
"msc3952_intentional_mentions", False
|
||||||
and self.msc3966_exact_event_property_contains
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# MSC3959: Do not generate notifications for edits.
|
# MSC3959: Do not generate notifications for edits.
|
||||||
|
@ -187,10 +176,8 @@ class ExperimentalConfig(Config):
|
||||||
"msc3958_supress_edit_notifs", False
|
"msc3958_supress_edit_notifs", False
|
||||||
)
|
)
|
||||||
|
|
||||||
# MSC3966: exact_event_property_contains push rule condition.
|
|
||||||
self.msc3966_exact_event_property_contains = experimental.get(
|
|
||||||
"msc3966_exact_event_property_contains", False
|
|
||||||
)
|
|
||||||
|
|
||||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||||
|
|
||||||
|
# MSC2659: Application service ping endpoint
|
||||||
|
self.msc2659_enabled = experimental.get("msc2659_enabled", False)
|
||||||
|
|
|
@ -168,13 +168,24 @@ async def check_state_independent_auth_rules(
|
||||||
return
|
return
|
||||||
|
|
||||||
# 2. Reject if event has auth_events that: ...
|
# 2. Reject if event has auth_events that: ...
|
||||||
|
if batched_auth_events:
|
||||||
|
# Copy the batched auth events to avoid mutating them.
|
||||||
|
auth_events = dict(batched_auth_events)
|
||||||
|
needed_auth_event_ids = set(event.auth_event_ids()) - batched_auth_events.keys()
|
||||||
|
if needed_auth_event_ids:
|
||||||
|
auth_events.update(
|
||||||
|
await store.get_events(
|
||||||
|
needed_auth_event_ids,
|
||||||
|
redact_behaviour=EventRedactBehaviour.as_is,
|
||||||
|
allow_rejected=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
auth_events = await store.get_events(
|
auth_events = await store.get_events(
|
||||||
event.auth_event_ids(),
|
event.auth_event_ids(),
|
||||||
redact_behaviour=EventRedactBehaviour.as_is,
|
redact_behaviour=EventRedactBehaviour.as_is,
|
||||||
allow_rejected=True,
|
allow_rejected=True,
|
||||||
)
|
)
|
||||||
if batched_auth_events:
|
|
||||||
auth_events.update(batched_auth_events)
|
|
||||||
|
|
||||||
room_id = event.room_id
|
room_id = event.room_id
|
||||||
auth_dict: MutableStateMap[str] = {}
|
auth_dict: MutableStateMap[str] = {}
|
||||||
|
|
|
@ -293,6 +293,7 @@ class EventContext(UnpersistedEventContextBase):
|
||||||
Maps a (type, state_key) to the event ID of the state event matching
|
Maps a (type, state_key) to the event ID of the state event matching
|
||||||
this tuple.
|
this tuple.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert self.state_group_before_event is not None
|
assert self.state_group_before_event is not None
|
||||||
return await self._storage.state.get_state_ids_for_group(
|
return await self._storage.state.get_state_ids_for_group(
|
||||||
self.state_group_before_event, state_filter
|
self.state_group_before_event, state_filter
|
||||||
|
|
|
@ -15,9 +15,7 @@
|
||||||
import email.mime.multipart
|
import email.mime.multipart
|
||||||
import email.utils
|
import email.utils
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
|
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||||
|
|
||||||
from twisted.web.http import Request
|
|
||||||
|
|
||||||
from synapse.api.errors import AuthError, StoreError, SynapseError
|
from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||||
|
@ -30,25 +28,17 @@ if TYPE_CHECKING:
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Types for callbacks to be registered via the module api
|
|
||||||
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
|
|
||||||
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
|
|
||||||
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
|
|
||||||
# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`.
|
|
||||||
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
|
|
||||||
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
|
|
||||||
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
|
|
||||||
|
|
||||||
|
|
||||||
class AccountValidityHandler:
|
class AccountValidityHandler:
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.config = hs.config
|
self.config = hs.config
|
||||||
self.store = self.hs.get_datastores().main
|
self.store = hs.get_datastores().main
|
||||||
self.send_email_handler = self.hs.get_send_email_handler()
|
self.send_email_handler = hs.get_send_email_handler()
|
||||||
self.clock = self.hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
self._app_name = self.hs.config.email.email_app_name
|
self._app_name = hs.config.email.email_app_name
|
||||||
|
self._module_api_callbacks = hs.get_module_api_callbacks().account_validity
|
||||||
|
|
||||||
self._account_validity_enabled = (
|
self._account_validity_enabled = (
|
||||||
hs.config.account_validity.account_validity_enabled
|
hs.config.account_validity.account_validity_enabled
|
||||||
|
@ -78,69 +68,6 @@ class AccountValidityHandler:
|
||||||
if hs.config.worker.run_background_tasks:
|
if hs.config.worker.run_background_tasks:
|
||||||
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
||||||
|
|
||||||
self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
|
|
||||||
self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
|
|
||||||
self._on_legacy_send_mail_callback: Optional[
|
|
||||||
ON_LEGACY_SEND_MAIL_CALLBACK
|
|
||||||
] = None
|
|
||||||
self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
|
|
||||||
|
|
||||||
# The legacy admin requests callback isn't a protected attribute because we need
|
|
||||||
# to access it from the admin servlet, which is outside of this handler.
|
|
||||||
self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
|
|
||||||
|
|
||||||
def register_account_validity_callbacks(
|
|
||||||
self,
|
|
||||||
is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
|
|
||||||
on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
|
|
||||||
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
|
|
||||||
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
|
|
||||||
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
|
|
||||||
) -> None:
|
|
||||||
"""Register callbacks from module for each hook."""
|
|
||||||
if is_user_expired is not None:
|
|
||||||
self._is_user_expired_callbacks.append(is_user_expired)
|
|
||||||
|
|
||||||
if on_user_registration is not None:
|
|
||||||
self._on_user_registration_callbacks.append(on_user_registration)
|
|
||||||
|
|
||||||
# The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
|
|
||||||
# an admin one). As part of moving the feature into a module, we need to change
|
|
||||||
# the path from /_matrix/client/unstable/account_validity/... to
|
|
||||||
# /_synapse/client/account_validity, because:
|
|
||||||
#
|
|
||||||
# * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
|
|
||||||
# * the way we register servlets means that modules can't register resources
|
|
||||||
# under /_matrix/client
|
|
||||||
#
|
|
||||||
# We need to allow for a transition period between the old and new endpoints
|
|
||||||
# in order to allow for clients to update (and for emails to be processed).
|
|
||||||
#
|
|
||||||
# Once the email-account-validity module is loaded, it will take control of account
|
|
||||||
# validity by moving the rows from our `account_validity` table into its own table.
|
|
||||||
#
|
|
||||||
# Therefore, we need to allow modules (in practice just the one implementing the
|
|
||||||
# email-based account validity) to temporarily hook into the legacy endpoints so we
|
|
||||||
# can route the traffic coming into the old endpoints into the module, which is
|
|
||||||
# why we have the following three temporary hooks.
|
|
||||||
if on_legacy_send_mail is not None:
|
|
||||||
if self._on_legacy_send_mail_callback is not None:
|
|
||||||
raise RuntimeError("Tried to register on_legacy_send_mail twice")
|
|
||||||
|
|
||||||
self._on_legacy_send_mail_callback = on_legacy_send_mail
|
|
||||||
|
|
||||||
if on_legacy_renew is not None:
|
|
||||||
if self._on_legacy_renew_callback is not None:
|
|
||||||
raise RuntimeError("Tried to register on_legacy_renew twice")
|
|
||||||
|
|
||||||
self._on_legacy_renew_callback = on_legacy_renew
|
|
||||||
|
|
||||||
if on_legacy_admin_request is not None:
|
|
||||||
if self.on_legacy_admin_request_callback is not None:
|
|
||||||
raise RuntimeError("Tried to register on_legacy_admin_request twice")
|
|
||||||
|
|
||||||
self.on_legacy_admin_request_callback = on_legacy_admin_request
|
|
||||||
|
|
||||||
async def is_user_expired(self, user_id: str) -> bool:
|
async def is_user_expired(self, user_id: str) -> bool:
|
||||||
"""Checks if a user has expired against third-party modules.
|
"""Checks if a user has expired against third-party modules.
|
||||||
|
|
||||||
|
@ -150,7 +77,7 @@ class AccountValidityHandler:
|
||||||
Returns:
|
Returns:
|
||||||
Whether the user has expired.
|
Whether the user has expired.
|
||||||
"""
|
"""
|
||||||
for callback in self._is_user_expired_callbacks:
|
for callback in self._module_api_callbacks.is_user_expired_callbacks:
|
||||||
expired = await delay_cancellation(callback(user_id))
|
expired = await delay_cancellation(callback(user_id))
|
||||||
if expired is not None:
|
if expired is not None:
|
||||||
return expired
|
return expired
|
||||||
|
@ -168,7 +95,7 @@ class AccountValidityHandler:
|
||||||
Args:
|
Args:
|
||||||
user_id: The ID of the newly registered user.
|
user_id: The ID of the newly registered user.
|
||||||
"""
|
"""
|
||||||
for callback in self._on_user_registration_callbacks:
|
for callback in self._module_api_callbacks.on_user_registration_callbacks:
|
||||||
await callback(user_id)
|
await callback(user_id)
|
||||||
|
|
||||||
@wrap_as_background_process("send_renewals")
|
@wrap_as_background_process("send_renewals")
|
||||||
|
@ -198,8 +125,8 @@ class AccountValidityHandler:
|
||||||
"""
|
"""
|
||||||
# If a module supports sending a renewal email from here, do that, otherwise do
|
# If a module supports sending a renewal email from here, do that, otherwise do
|
||||||
# the legacy dance.
|
# the legacy dance.
|
||||||
if self._on_legacy_send_mail_callback is not None:
|
if self._module_api_callbacks.on_legacy_send_mail_callback is not None:
|
||||||
await self._on_legacy_send_mail_callback(user_id)
|
await self._module_api_callbacks.on_legacy_send_mail_callback(user_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
if not self._account_validity_renew_by_email_enabled:
|
if not self._account_validity_renew_by_email_enabled:
|
||||||
|
@ -336,8 +263,10 @@ class AccountValidityHandler:
|
||||||
"""
|
"""
|
||||||
# If a module supports triggering a renew from here, do that, otherwise do the
|
# If a module supports triggering a renew from here, do that, otherwise do the
|
||||||
# legacy dance.
|
# legacy dance.
|
||||||
if self._on_legacy_renew_callback is not None:
|
if self._module_api_callbacks.on_legacy_renew_callback is not None:
|
||||||
return await self._on_legacy_renew_callback(renewal_token)
|
return await self._module_api_callbacks.on_legacy_renew_callback(
|
||||||
|
renewal_token
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(
|
(
|
||||||
|
|
|
@ -63,9 +63,18 @@ class EventAuthHandler:
|
||||||
self._store, event, batched_auth_events
|
self._store, event, batched_auth_events
|
||||||
)
|
)
|
||||||
auth_event_ids = event.auth_event_ids()
|
auth_event_ids = event.auth_event_ids()
|
||||||
auth_events_by_id = await self._store.get_events(auth_event_ids)
|
|
||||||
if batched_auth_events:
|
if batched_auth_events:
|
||||||
auth_events_by_id.update(batched_auth_events)
|
# Copy the batched auth events to avoid mutating them.
|
||||||
|
auth_events_by_id = dict(batched_auth_events)
|
||||||
|
needed_auth_event_ids = set(auth_event_ids) - set(batched_auth_events)
|
||||||
|
if needed_auth_event_ids:
|
||||||
|
auth_events_by_id.update(
|
||||||
|
await self._store.get_events(needed_auth_event_ids)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
auth_events_by_id = await self._store.get_events(auth_event_ids)
|
||||||
|
|
||||||
check_state_dependent_auth_rules(event, auth_events_by_id.values())
|
check_state_dependent_auth_rules(event, auth_events_by_id.values())
|
||||||
|
|
||||||
def compute_auth_events(
|
def compute_auth_events(
|
||||||
|
|
|
@ -987,10 +987,11 @@ class EventCreationHandler:
|
||||||
# a situation where event persistence can't keep up, causing
|
# a situation where event persistence can't keep up, causing
|
||||||
# extremities to pile up, which in turn leads to state resolution
|
# extremities to pile up, which in turn leads to state resolution
|
||||||
# taking longer.
|
# taking longer.
|
||||||
async with self.limiter.queue(event_dict["room_id"]):
|
room_id = event_dict["room_id"]
|
||||||
|
async with self.limiter.queue(room_id):
|
||||||
if txn_id:
|
if txn_id:
|
||||||
event = await self.get_event_from_transaction(
|
event = await self.get_event_from_transaction(
|
||||||
requester, txn_id, event_dict["room_id"]
|
requester, txn_id, room_id
|
||||||
)
|
)
|
||||||
if event:
|
if event:
|
||||||
# we know it was persisted, so must have a stream ordering
|
# we know it was persisted, so must have a stream ordering
|
||||||
|
@ -1000,6 +1001,18 @@ class EventCreationHandler:
|
||||||
event.internal_metadata.stream_ordering,
|
event.internal_metadata.stream_ordering,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# If we don't have any prev event IDs specified then we need to
|
||||||
|
# check that the host is in the room (as otherwise populating the
|
||||||
|
# prev events will fail), at which point we may as well check the
|
||||||
|
# local user is in the room.
|
||||||
|
if not prev_event_ids:
|
||||||
|
user_id = requester.user.to_string()
|
||||||
|
is_user_in_room = await self.store.check_local_user_in_room(
|
||||||
|
user_id, room_id
|
||||||
|
)
|
||||||
|
if not is_user_in_room:
|
||||||
|
raise AuthError(403, f"User {user_id} not in room {room_id}")
|
||||||
|
|
||||||
# Try several times, it could fail with PartialStateConflictError
|
# Try several times, it could fail with PartialStateConflictError
|
||||||
# in handle_new_client_event, cf comment in except block.
|
# in handle_new_client_event, cf comment in except block.
|
||||||
max_retries = 5
|
max_retries = 5
|
||||||
|
|
|
@ -683,7 +683,7 @@ class PaginationHandler:
|
||||||
|
|
||||||
await self._storage_controllers.purge_events.purge_room(room_id)
|
await self._storage_controllers.purge_events.purge_room(room_id)
|
||||||
|
|
||||||
logger.info("complete")
|
logger.info("purge complete for room_id %s", room_id)
|
||||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
|
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
|
||||||
except Exception:
|
except Exception:
|
||||||
f = Failure()
|
f = Failure()
|
||||||
|
|
|
@ -63,7 +63,7 @@ class ProfileHandler:
|
||||||
|
|
||||||
self._third_party_rules = hs.get_third_party_event_rules()
|
self._third_party_rules = hs.get_third_party_event_rules()
|
||||||
|
|
||||||
async def get_profile(self, user_id: str) -> JsonDict:
|
async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict:
|
||||||
target_user = UserID.from_string(user_id)
|
target_user = UserID.from_string(user_id)
|
||||||
|
|
||||||
if self.hs.is_mine(target_user):
|
if self.hs.is_mine(target_user):
|
||||||
|
@ -81,7 +81,7 @@ class ProfileHandler:
|
||||||
destination=target_user.domain,
|
destination=target_user.domain,
|
||||||
query_type="profile",
|
query_type="profile",
|
||||||
args={"user_id": user_id},
|
args={"user_id": user_id},
|
||||||
ignore_backoff=True,
|
ignore_backoff=ignore_backoff,
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
except RequestSendFailed as e:
|
except RequestSendFailed as e:
|
||||||
|
|
|
@ -596,14 +596,20 @@ class RegistrationHandler:
|
||||||
Args:
|
Args:
|
||||||
user_id: The user to join
|
user_id: The user to join
|
||||||
"""
|
"""
|
||||||
|
# If there are no rooms to auto-join, just bail.
|
||||||
|
if not self.hs.config.registration.auto_join_rooms:
|
||||||
|
return
|
||||||
|
|
||||||
# auto-join the user to any rooms we're supposed to dump them into
|
# auto-join the user to any rooms we're supposed to dump them into
|
||||||
|
|
||||||
# try to create the room if we're the first real user on the server. Note
|
# try to create the room if we're the first real user on the server. Note
|
||||||
# that an auto-generated support or bot user is not a real user and will never be
|
# that an auto-generated support or bot user is not a real user and will never be
|
||||||
# the user to create the room
|
# the user to create the room
|
||||||
should_auto_create_rooms = False
|
should_auto_create_rooms = False
|
||||||
is_real_user = await self.store.is_real_user(user_id)
|
if (
|
||||||
if self.hs.config.registration.autocreate_auto_join_rooms and is_real_user:
|
self.hs.config.registration.autocreate_auto_join_rooms
|
||||||
|
and await self.store.is_real_user(user_id)
|
||||||
|
):
|
||||||
count = await self.store.count_real_users()
|
count = await self.store.count_real_users()
|
||||||
should_auto_create_rooms = count == 1
|
should_auto_create_rooms = count == 1
|
||||||
|
|
||||||
|
|
|
@ -569,7 +569,7 @@ class RoomCreationHandler:
|
||||||
new_room_id,
|
new_room_id,
|
||||||
# we expect to override all the presets with initial_state, so this is
|
# we expect to override all the presets with initial_state, so this is
|
||||||
# somewhat arbitrary.
|
# somewhat arbitrary.
|
||||||
preset_config=RoomCreationPreset.PRIVATE_CHAT,
|
room_config={"preset": RoomCreationPreset.PRIVATE_CHAT},
|
||||||
invite_list=[],
|
invite_list=[],
|
||||||
initial_state=initial_state,
|
initial_state=initial_state,
|
||||||
creation_content=creation_content,
|
creation_content=creation_content,
|
||||||
|
@ -904,13 +904,6 @@ class RoomCreationHandler:
|
||||||
check_membership=False,
|
check_membership=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
preset_config = config.get(
|
|
||||||
"preset",
|
|
||||||
RoomCreationPreset.PRIVATE_CHAT
|
|
||||||
if visibility == "private"
|
|
||||||
else RoomCreationPreset.PUBLIC_CHAT,
|
|
||||||
)
|
|
||||||
|
|
||||||
raw_initial_state = config.get("initial_state", [])
|
raw_initial_state = config.get("initial_state", [])
|
||||||
|
|
||||||
initial_state = OrderedDict()
|
initial_state = OrderedDict()
|
||||||
|
@ -929,7 +922,7 @@ class RoomCreationHandler:
|
||||||
) = await self._send_events_for_new_room(
|
) = await self._send_events_for_new_room(
|
||||||
requester,
|
requester,
|
||||||
room_id,
|
room_id,
|
||||||
preset_config=preset_config,
|
room_config=config,
|
||||||
invite_list=invite_list,
|
invite_list=invite_list,
|
||||||
initial_state=initial_state,
|
initial_state=initial_state,
|
||||||
creation_content=creation_content,
|
creation_content=creation_content,
|
||||||
|
@ -938,48 +931,6 @@ class RoomCreationHandler:
|
||||||
creator_join_profile=creator_join_profile,
|
creator_join_profile=creator_join_profile,
|
||||||
)
|
)
|
||||||
|
|
||||||
if "name" in config:
|
|
||||||
name = config["name"]
|
|
||||||
(
|
|
||||||
name_event,
|
|
||||||
last_stream_id,
|
|
||||||
) = await self.event_creation_handler.create_and_send_nonmember_event(
|
|
||||||
requester,
|
|
||||||
{
|
|
||||||
"type": EventTypes.Name,
|
|
||||||
"room_id": room_id,
|
|
||||||
"sender": user_id,
|
|
||||||
"state_key": "",
|
|
||||||
"content": {"name": name},
|
|
||||||
},
|
|
||||||
ratelimit=False,
|
|
||||||
prev_event_ids=[last_sent_event_id],
|
|
||||||
depth=depth,
|
|
||||||
)
|
|
||||||
last_sent_event_id = name_event.event_id
|
|
||||||
depth += 1
|
|
||||||
|
|
||||||
if "topic" in config:
|
|
||||||
topic = config["topic"]
|
|
||||||
(
|
|
||||||
topic_event,
|
|
||||||
last_stream_id,
|
|
||||||
) = await self.event_creation_handler.create_and_send_nonmember_event(
|
|
||||||
requester,
|
|
||||||
{
|
|
||||||
"type": EventTypes.Topic,
|
|
||||||
"room_id": room_id,
|
|
||||||
"sender": user_id,
|
|
||||||
"state_key": "",
|
|
||||||
"content": {"topic": topic},
|
|
||||||
},
|
|
||||||
ratelimit=False,
|
|
||||||
prev_event_ids=[last_sent_event_id],
|
|
||||||
depth=depth,
|
|
||||||
)
|
|
||||||
last_sent_event_id = topic_event.event_id
|
|
||||||
depth += 1
|
|
||||||
|
|
||||||
# we avoid dropping the lock between invites, as otherwise joins can
|
# we avoid dropping the lock between invites, as otherwise joins can
|
||||||
# start coming in and making the createRoom slow.
|
# start coming in and making the createRoom slow.
|
||||||
#
|
#
|
||||||
|
@ -1047,7 +998,7 @@ class RoomCreationHandler:
|
||||||
self,
|
self,
|
||||||
creator: Requester,
|
creator: Requester,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
preset_config: str,
|
room_config: JsonDict,
|
||||||
invite_list: List[str],
|
invite_list: List[str],
|
||||||
initial_state: MutableStateMap,
|
initial_state: MutableStateMap,
|
||||||
creation_content: JsonDict,
|
creation_content: JsonDict,
|
||||||
|
@ -1064,11 +1015,33 @@ class RoomCreationHandler:
|
||||||
|
|
||||||
Rate limiting should already have been applied by this point.
|
Rate limiting should already have been applied by this point.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
creator:
|
||||||
|
the user requesting the room creation
|
||||||
|
room_id:
|
||||||
|
room id for the room being created
|
||||||
|
room_config:
|
||||||
|
A dict of configuration options. This will be the body of
|
||||||
|
a /createRoom request; see
|
||||||
|
https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom
|
||||||
|
invite_list:
|
||||||
|
a list of user ids to invite to the room
|
||||||
|
initial_state:
|
||||||
|
A list of state events to set in the new room.
|
||||||
|
creation_content:
|
||||||
|
Extra keys, such as m.federate, to be added to the content of the m.room.create event.
|
||||||
|
room_alias:
|
||||||
|
alias for the room
|
||||||
|
power_level_content_override:
|
||||||
|
The power level content to override in the default power level event.
|
||||||
|
creator_join_profile:
|
||||||
|
Set to override the displayname and avatar for the creating
|
||||||
|
user in this room.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A tuple containing the stream ID, event ID and depth of the last
|
A tuple containing the stream ID, event ID and depth of the last
|
||||||
event sent to the room.
|
event sent to the room.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
creator_id = creator.user.to_string()
|
creator_id = creator.user.to_string()
|
||||||
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
|
||||||
depth = 1
|
depth = 1
|
||||||
|
@ -1079,9 +1052,6 @@ class RoomCreationHandler:
|
||||||
# created (but not persisted to the db) to determine state for future created events
|
# created (but not persisted to the db) to determine state for future created events
|
||||||
# (as this info can't be pulled from the db)
|
# (as this info can't be pulled from the db)
|
||||||
state_map: MutableStateMap[str] = {}
|
state_map: MutableStateMap[str] = {}
|
||||||
# current_state_group of last event created. Used for computing event context of
|
|
||||||
# events to be batched
|
|
||||||
current_state_group: Optional[int] = None
|
|
||||||
|
|
||||||
def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
|
def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
|
||||||
e = {"type": etype, "content": content}
|
e = {"type": etype, "content": content}
|
||||||
|
@ -1123,7 +1093,9 @@ class RoomCreationHandler:
|
||||||
event_dict,
|
event_dict,
|
||||||
prev_event_ids=prev_event,
|
prev_event_ids=prev_event,
|
||||||
depth=depth,
|
depth=depth,
|
||||||
state_map=state_map,
|
# Take a copy to ensure each event gets a unique copy of
|
||||||
|
# state_map since it is modified below.
|
||||||
|
state_map=dict(state_map),
|
||||||
for_batch=for_batch,
|
for_batch=for_batch,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1133,6 +1105,14 @@ class RoomCreationHandler:
|
||||||
|
|
||||||
return new_event, new_unpersisted_context
|
return new_event, new_unpersisted_context
|
||||||
|
|
||||||
|
visibility = room_config.get("visibility", "private")
|
||||||
|
preset_config = room_config.get(
|
||||||
|
"preset",
|
||||||
|
RoomCreationPreset.PRIVATE_CHAT
|
||||||
|
if visibility == "private"
|
||||||
|
else RoomCreationPreset.PUBLIC_CHAT,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config = self._presets_dict[preset_config]
|
config = self._presets_dict[preset_config]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
@ -1284,6 +1264,24 @@ class RoomCreationHandler:
|
||||||
)
|
)
|
||||||
events_to_send.append((encryption_event, encryption_context))
|
events_to_send.append((encryption_event, encryption_context))
|
||||||
|
|
||||||
|
if "name" in room_config:
|
||||||
|
name = room_config["name"]
|
||||||
|
name_event, name_context = await create_event(
|
||||||
|
EventTypes.Name,
|
||||||
|
{"name": name},
|
||||||
|
True,
|
||||||
|
)
|
||||||
|
events_to_send.append((name_event, name_context))
|
||||||
|
|
||||||
|
if "topic" in room_config:
|
||||||
|
topic = room_config["topic"]
|
||||||
|
topic_event, topic_context = await create_event(
|
||||||
|
EventTypes.Topic,
|
||||||
|
{"topic": topic},
|
||||||
|
True,
|
||||||
|
)
|
||||||
|
events_to_send.append((topic_event, topic_context))
|
||||||
|
|
||||||
datastore = self.hs.get_datastores().state
|
datastore = self.hs.get_datastores().state
|
||||||
events_and_context = (
|
events_and_context = (
|
||||||
await UnpersistedEventContext.batch_persist_unpersisted_contexts(
|
await UnpersistedEventContext.batch_persist_unpersisted_contexts(
|
||||||
|
|
|
@ -1226,6 +1226,10 @@ class SyncHandler:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
event_with_membership_auth = events_with_membership_auth[member]
|
event_with_membership_auth = events_with_membership_auth[member]
|
||||||
|
is_create = (
|
||||||
|
event_with_membership_auth.is_state()
|
||||||
|
and event_with_membership_auth.type == EventTypes.Create
|
||||||
|
)
|
||||||
is_join = (
|
is_join = (
|
||||||
event_with_membership_auth.is_state()
|
event_with_membership_auth.is_state()
|
||||||
and event_with_membership_auth.type == EventTypes.Member
|
and event_with_membership_auth.type == EventTypes.Member
|
||||||
|
@ -1233,9 +1237,10 @@ class SyncHandler:
|
||||||
and event_with_membership_auth.content.get("membership")
|
and event_with_membership_auth.content.get("membership")
|
||||||
== Membership.JOIN
|
== Membership.JOIN
|
||||||
)
|
)
|
||||||
if not is_join:
|
if not is_create and not is_join:
|
||||||
# The event must include the desired membership as an auth event, unless
|
# The event must include the desired membership as an auth event, unless
|
||||||
# it's the first join event for a given user.
|
# it's the `m.room.create` event for a room or the first join event for
|
||||||
|
# a given user.
|
||||||
missing_members.add(member)
|
missing_members.add(member)
|
||||||
auth_event_ids.update(event_with_membership_auth.auth_event_ids())
|
auth_event_ids.update(event_with_membership_auth.auth_event_ids())
|
||||||
|
|
||||||
|
|
|
@ -13,21 +13,52 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from http import HTTPStatus
|
||||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
|
||||||
|
|
||||||
|
from twisted.internet.interfaces import IDelayedCall
|
||||||
|
|
||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
|
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
|
||||||
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.databases.main.user_directory import SearchResult
|
from synapse.storage.databases.main.user_directory import SearchResult
|
||||||
from synapse.storage.roommember import ProfileInfo
|
from synapse.storage.roommember import ProfileInfo
|
||||||
|
from synapse.types import UserID
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
from synapse.util.stringutils import non_null_str_or_none
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Don't refresh a stale user directory entry, using a Federation /profile request,
|
||||||
|
# for 60 seconds. This gives time for other state events to arrive (which will
|
||||||
|
# then be coalesced such that only one /profile request is made).
|
||||||
|
USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000
|
||||||
|
|
||||||
|
# Maximum number of remote servers that we will attempt to refresh profiles for
|
||||||
|
# in one go.
|
||||||
|
MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO = 5
|
||||||
|
|
||||||
|
# As long as we have servers to refresh (without backoff), keep adding more
|
||||||
|
# every 15 seconds.
|
||||||
|
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int:
|
||||||
|
"""
|
||||||
|
Calculates the time of a next retry given `now_ts` in ms and the number
|
||||||
|
of failures encountered thus far.
|
||||||
|
|
||||||
|
Currently the sequence goes:
|
||||||
|
1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week
|
||||||
|
"""
|
||||||
|
return now_ts + 60_000 * (5 ** min(retry_count, 7))
|
||||||
|
|
||||||
|
|
||||||
class UserDirectoryHandler(StateDeltasHandler):
|
class UserDirectoryHandler(StateDeltasHandler):
|
||||||
"""Handles queries and updates for the user_directory.
|
"""Handles queries and updates for the user_directory.
|
||||||
|
@ -64,12 +95,24 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
self.update_user_directory = hs.config.worker.should_update_user_directory
|
self.update_user_directory = hs.config.worker.should_update_user_directory
|
||||||
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
||||||
self.spam_checker = hs.get_spam_checker()
|
self.spam_checker = hs.get_spam_checker()
|
||||||
|
self._hs = hs
|
||||||
|
|
||||||
# The current position in the current_state_delta stream
|
# The current position in the current_state_delta stream
|
||||||
self.pos: Optional[int] = None
|
self.pos: Optional[int] = None
|
||||||
|
|
||||||
# Guard to ensure we only process deltas one at a time
|
# Guard to ensure we only process deltas one at a time
|
||||||
self._is_processing = False
|
self._is_processing = False
|
||||||
|
|
||||||
|
# Guard to ensure we only have one process for refreshing remote profiles
|
||||||
|
self._is_refreshing_remote_profiles = False
|
||||||
|
# Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process`
|
||||||
|
self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None
|
||||||
|
|
||||||
|
# Guard to ensure we only have one process for refreshing remote profiles
|
||||||
|
# for the given servers.
|
||||||
|
# Set of server names.
|
||||||
|
self._is_refreshing_remote_profiles_for_servers: Set[str] = set()
|
||||||
|
|
||||||
if self.update_user_directory:
|
if self.update_user_directory:
|
||||||
self.notifier.add_replication_callback(self.notify_new_event)
|
self.notifier.add_replication_callback(self.notify_new_event)
|
||||||
|
|
||||||
|
@ -77,6 +120,11 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
# we start populating the user directory
|
# we start populating the user directory
|
||||||
self.clock.call_later(0, self.notify_new_event)
|
self.clock.call_later(0, self.notify_new_event)
|
||||||
|
|
||||||
|
# Kick off the profile refresh process on startup
|
||||||
|
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||||
|
10, self.kick_off_remote_profile_refresh_process
|
||||||
|
)
|
||||||
|
|
||||||
async def search_users(
|
async def search_users(
|
||||||
self, user_id: str, search_term: str, limit: int
|
self, user_id: str, search_term: str, limit: int
|
||||||
) -> SearchResult:
|
) -> SearchResult:
|
||||||
|
@ -200,8 +248,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
typ = delta["type"]
|
typ = delta["type"]
|
||||||
state_key = delta["state_key"]
|
state_key = delta["state_key"]
|
||||||
room_id = delta["room_id"]
|
room_id = delta["room_id"]
|
||||||
event_id = delta["event_id"]
|
event_id: Optional[str] = delta["event_id"]
|
||||||
prev_event_id = delta["prev_event_id"]
|
prev_event_id: Optional[str] = delta["prev_event_id"]
|
||||||
|
|
||||||
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
||||||
|
|
||||||
|
@ -297,8 +345,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
async def _handle_room_membership_event(
|
async def _handle_room_membership_event(
|
||||||
self,
|
self,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
prev_event_id: str,
|
prev_event_id: Optional[str],
|
||||||
event_id: str,
|
event_id: Optional[str],
|
||||||
state_key: str,
|
state_key: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Process a single room membershp event.
|
"""Process a single room membershp event.
|
||||||
|
@ -348,7 +396,8 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
# Handle any profile changes for remote users.
|
# Handle any profile changes for remote users.
|
||||||
# (For local users the rest of the application calls
|
# (For local users the rest of the application calls
|
||||||
# `handle_local_profile_change`.)
|
# `handle_local_profile_change`.)
|
||||||
if is_remote:
|
# Only process if there is an event_id.
|
||||||
|
if is_remote and event_id is not None:
|
||||||
await self._handle_possible_remote_profile_change(
|
await self._handle_possible_remote_profile_change(
|
||||||
state_key, room_id, prev_event_id, event_id
|
state_key, room_id, prev_event_id, event_id
|
||||||
)
|
)
|
||||||
|
@ -356,28 +405,12 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
# This may be the first time we've seen a remote user. If
|
# This may be the first time we've seen a remote user. If
|
||||||
# so, ensure we have a directory entry for them. (For local users,
|
# so, ensure we have a directory entry for them. (For local users,
|
||||||
# the rest of the application calls `handle_local_profile_change`.)
|
# the rest of the application calls `handle_local_profile_change`.)
|
||||||
if is_remote:
|
# Only process if there is an event_id.
|
||||||
await self._upsert_directory_entry_for_remote_user(state_key, event_id)
|
if is_remote and event_id is not None:
|
||||||
await self._track_user_joined_room(room_id, state_key)
|
await self._handle_possible_remote_profile_change(
|
||||||
|
state_key, room_id, None, event_id
|
||||||
async def _upsert_directory_entry_for_remote_user(
|
|
||||||
self, user_id: str, event_id: str
|
|
||||||
) -> None:
|
|
||||||
"""A remote user has just joined a room. Ensure they have an entry in
|
|
||||||
the user directory. The caller is responsible for making sure they're
|
|
||||||
remote.
|
|
||||||
"""
|
|
||||||
event = await self.store.get_event(event_id, allow_none=True)
|
|
||||||
# It isn't expected for this event to not exist, but we
|
|
||||||
# don't want the entire background process to break.
|
|
||||||
if event is None:
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.debug("Adding new user to dir, %r", user_id)
|
|
||||||
|
|
||||||
await self.store.update_profile_in_user_dir(
|
|
||||||
user_id, event.content.get("displayname"), event.content.get("avatar_url")
|
|
||||||
)
|
)
|
||||||
|
await self._track_user_joined_room(room_id, state_key)
|
||||||
|
|
||||||
async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
|
async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
|
||||||
"""Someone's just joined a room. Update `users_in_public_rooms` or
|
"""Someone's just joined a room. Update `users_in_public_rooms` or
|
||||||
|
@ -460,14 +493,17 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
user_id: str,
|
user_id: str,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
prev_event_id: Optional[str],
|
prev_event_id: Optional[str],
|
||||||
event_id: Optional[str],
|
event_id: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Check member event changes for any profile changes and update the
|
"""Check member event changes for any profile changes and update the
|
||||||
database if there are. This is intended for remote users only. The caller
|
database if there are. This is intended for remote users only. The caller
|
||||||
is responsible for checking that the given user is remote.
|
is responsible for checking that the given user is remote.
|
||||||
"""
|
"""
|
||||||
if not prev_event_id or not event_id:
|
|
||||||
return
|
if not prev_event_id:
|
||||||
|
# If we don't have an older event to fall back on, just fetch the same
|
||||||
|
# event itself.
|
||||||
|
prev_event_id = event_id
|
||||||
|
|
||||||
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
||||||
event = await self.store.get_event(event_id, allow_none=True)
|
event = await self.store.get_event(event_id, allow_none=True)
|
||||||
|
@ -478,17 +514,236 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||||
if event.membership != Membership.JOIN:
|
if event.membership != Membership.JOIN:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
|
||||||
|
room_id
|
||||||
|
)
|
||||||
|
if not is_public:
|
||||||
|
# Don't collect user profiles from private rooms as they are not guaranteed
|
||||||
|
# to be the same as the user's global profile.
|
||||||
|
now_ts = self.clock.time_msec()
|
||||||
|
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||||
|
user_id,
|
||||||
|
next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS,
|
||||||
|
retry_counter=0,
|
||||||
|
)
|
||||||
|
# Schedule a wake-up to refresh the user directory for this server.
|
||||||
|
# We intentionally wake up this server directly because we don't want
|
||||||
|
# other servers ahead of it in the queue to get in the way of updating
|
||||||
|
# the profile if the server only just sent us an event.
|
||||||
|
self.clock.call_later(
|
||||||
|
USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||||
|
self.kick_off_remote_profile_refresh_process_for_remote_server,
|
||||||
|
UserID.from_string(user_id).domain,
|
||||||
|
)
|
||||||
|
# Schedule a wake-up to handle any backoffs that may occur in the future.
|
||||||
|
self.clock.call_later(
|
||||||
|
2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
|
||||||
|
self.kick_off_remote_profile_refresh_process,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
prev_name = prev_event.content.get("displayname")
|
prev_name = prev_event.content.get("displayname")
|
||||||
new_name = event.content.get("displayname")
|
new_name = event.content.get("displayname")
|
||||||
# If the new name is an unexpected form, do not update the directory.
|
# If the new name is an unexpected form, replace with None.
|
||||||
if not isinstance(new_name, str):
|
if not isinstance(new_name, str):
|
||||||
new_name = prev_name
|
new_name = None
|
||||||
|
|
||||||
prev_avatar = prev_event.content.get("avatar_url")
|
prev_avatar = prev_event.content.get("avatar_url")
|
||||||
new_avatar = event.content.get("avatar_url")
|
new_avatar = event.content.get("avatar_url")
|
||||||
# If the new avatar is an unexpected form, do not update the directory.
|
# If the new avatar is an unexpected form, replace with None.
|
||||||
if not isinstance(new_avatar, str):
|
if not isinstance(new_avatar, str):
|
||||||
new_avatar = prev_avatar
|
new_avatar = None
|
||||||
|
|
||||||
if prev_name != new_name or prev_avatar != new_avatar:
|
if (
|
||||||
|
prev_name != new_name
|
||||||
|
or prev_avatar != new_avatar
|
||||||
|
or prev_event_id == event_id
|
||||||
|
):
|
||||||
|
# Only update if something has changed, or we didn't have a previous event
|
||||||
|
# in the first place.
|
||||||
await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
|
await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
|
||||||
|
|
||||||
|
def kick_off_remote_profile_refresh_process(self) -> None:
|
||||||
|
"""Called when there may be remote users with stale profiles to be refreshed"""
|
||||||
|
if not self.update_user_directory:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self._is_refreshing_remote_profiles:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self._refresh_remote_profiles_call_later:
|
||||||
|
if self._refresh_remote_profiles_call_later.active():
|
||||||
|
self._refresh_remote_profiles_call_later.cancel()
|
||||||
|
self._refresh_remote_profiles_call_later = None
|
||||||
|
|
||||||
|
async def process() -> None:
|
||||||
|
try:
|
||||||
|
await self._unsafe_refresh_remote_profiles()
|
||||||
|
finally:
|
||||||
|
self._is_refreshing_remote_profiles = False
|
||||||
|
|
||||||
|
self._is_refreshing_remote_profiles = True
|
||||||
|
run_as_background_process("user_directory.refresh_remote_profiles", process)
|
||||||
|
|
||||||
|
async def _unsafe_refresh_remote_profiles(self) -> None:
|
||||||
|
limit = MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO - len(
|
||||||
|
self._is_refreshing_remote_profiles_for_servers
|
||||||
|
)
|
||||||
|
if limit <= 0:
|
||||||
|
# nothing to do: already refreshing the maximum number of servers
|
||||||
|
# at once.
|
||||||
|
# Come back later.
|
||||||
|
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||||
|
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
|
||||||
|
self.kick_off_remote_profile_refresh_process,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
servers_to_refresh = (
|
||||||
|
await self.store.get_remote_servers_with_profiles_to_refresh(
|
||||||
|
now_ts=self.clock.time_msec(), limit=limit
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not servers_to_refresh:
|
||||||
|
# Do we have any backing-off servers that we should try again
|
||||||
|
# for eventually?
|
||||||
|
# By setting `now` is a point in the far future, we can ask for
|
||||||
|
# which server/user is next to be refreshed, even though it is
|
||||||
|
# not actually refreshable *now*.
|
||||||
|
end_of_time = 1 << 62
|
||||||
|
backing_off_servers = (
|
||||||
|
await self.store.get_remote_servers_with_profiles_to_refresh(
|
||||||
|
now_ts=end_of_time, limit=1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if backing_off_servers:
|
||||||
|
# Find out when the next user is refreshable and schedule a
|
||||||
|
# refresh then.
|
||||||
|
backing_off_server_name = backing_off_servers[0]
|
||||||
|
users = await self.store.get_remote_users_to_refresh_on_server(
|
||||||
|
backing_off_server_name, now_ts=end_of_time, limit=1
|
||||||
|
)
|
||||||
|
if not users:
|
||||||
|
return
|
||||||
|
_, _, next_try_at_ts = users[0]
|
||||||
|
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||||
|
((next_try_at_ts - self.clock.time_msec()) // 1000) + 2,
|
||||||
|
self.kick_off_remote_profile_refresh_process,
|
||||||
|
)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
for server_to_refresh in servers_to_refresh:
|
||||||
|
self.kick_off_remote_profile_refresh_process_for_remote_server(
|
||||||
|
server_to_refresh
|
||||||
|
)
|
||||||
|
|
||||||
|
self._refresh_remote_profiles_call_later = self.clock.call_later(
|
||||||
|
INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
|
||||||
|
self.kick_off_remote_profile_refresh_process,
|
||||||
|
)
|
||||||
|
|
||||||
|
def kick_off_remote_profile_refresh_process_for_remote_server(
|
||||||
|
self, server_name: str
|
||||||
|
) -> None:
|
||||||
|
"""Called when there may be remote users with stale profiles to be refreshed
|
||||||
|
on the given server."""
|
||||||
|
if not self.update_user_directory:
|
||||||
|
return
|
||||||
|
|
||||||
|
if server_name in self._is_refreshing_remote_profiles_for_servers:
|
||||||
|
return
|
||||||
|
|
||||||
|
async def process() -> None:
|
||||||
|
try:
|
||||||
|
await self._unsafe_refresh_remote_profiles_for_remote_server(
|
||||||
|
server_name
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
self._is_refreshing_remote_profiles_for_servers.remove(server_name)
|
||||||
|
|
||||||
|
self._is_refreshing_remote_profiles_for_servers.add(server_name)
|
||||||
|
run_as_background_process(
|
||||||
|
"user_directory.refresh_remote_profiles_for_remote_server", process
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _unsafe_refresh_remote_profiles_for_remote_server(
|
||||||
|
self, server_name: str
|
||||||
|
) -> None:
|
||||||
|
logger.info("Refreshing profiles in user directory for %s", server_name)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# Get a handful of users to process.
|
||||||
|
next_batch = await self.store.get_remote_users_to_refresh_on_server(
|
||||||
|
server_name, now_ts=self.clock.time_msec(), limit=10
|
||||||
|
)
|
||||||
|
if not next_batch:
|
||||||
|
# Finished for now
|
||||||
|
return
|
||||||
|
|
||||||
|
for user_id, retry_counter, _ in next_batch:
|
||||||
|
# Request the profile of the user.
|
||||||
|
try:
|
||||||
|
profile = await self._hs.get_profile_handler().get_profile(
|
||||||
|
user_id, ignore_backoff=False
|
||||||
|
)
|
||||||
|
except NotRetryingDestination as e:
|
||||||
|
logger.info(
|
||||||
|
"Failed to refresh profile for %r because the destination is undergoing backoff",
|
||||||
|
user_id,
|
||||||
|
)
|
||||||
|
# As a special-case, we back off until the destination is no longer
|
||||||
|
# backed off from.
|
||||||
|
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||||
|
user_id,
|
||||||
|
e.retry_last_ts + e.retry_interval,
|
||||||
|
retry_counter=retry_counter + 1,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
except SynapseError as e:
|
||||||
|
if e.code == HTTPStatus.NOT_FOUND and e.errcode == Codes.NOT_FOUND:
|
||||||
|
# The profile doesn't exist.
|
||||||
|
# TODO Does this mean we should clear it from our user
|
||||||
|
# directory?
|
||||||
|
await self.store.clear_remote_user_profile_in_user_dir_stale(
|
||||||
|
user_id
|
||||||
|
)
|
||||||
|
logger.warning(
|
||||||
|
"Refresh of remote profile %r: not found (%r)",
|
||||||
|
user_id,
|
||||||
|
e.msg,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.warning(
|
||||||
|
"Failed to refresh profile for %r because %r", user_id, e
|
||||||
|
)
|
||||||
|
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||||
|
user_id,
|
||||||
|
calculate_time_of_next_retry(
|
||||||
|
self.clock.time_msec(), retry_counter + 1
|
||||||
|
),
|
||||||
|
retry_counter=retry_counter + 1,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
except Exception:
|
||||||
|
logger.error(
|
||||||
|
"Failed to refresh profile for %r due to unhandled exception",
|
||||||
|
user_id,
|
||||||
|
exc_info=True,
|
||||||
|
)
|
||||||
|
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||||
|
user_id,
|
||||||
|
calculate_time_of_next_retry(
|
||||||
|
self.clock.time_msec(), retry_counter + 1
|
||||||
|
),
|
||||||
|
retry_counter=retry_counter + 1,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
await self.store.update_profile_in_user_dir(
|
||||||
|
user_id,
|
||||||
|
display_name=non_null_str_or_none(profile.get("displayname")),
|
||||||
|
avatar_url=non_null_str_or_none(profile.get("avatar_url")),
|
||||||
|
)
|
||||||
|
|
|
@ -268,8 +268,8 @@ class BlacklistingAgentWrapper(Agent):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
agent: IAgent,
|
agent: IAgent,
|
||||||
|
ip_blacklist: IPSet,
|
||||||
ip_whitelist: Optional[IPSet] = None,
|
ip_whitelist: Optional[IPSet] = None,
|
||||||
ip_blacklist: Optional[IPSet] = None,
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
|
@ -291,7 +291,9 @@ class BlacklistingAgentWrapper(Agent):
|
||||||
h = urllib.parse.urlparse(uri.decode("ascii"))
|
h = urllib.parse.urlparse(uri.decode("ascii"))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ip_address = IPAddress(h.hostname)
|
# h.hostname is Optional[str], None raises an AddrFormatError, so
|
||||||
|
# this is safe even though IPAddress requires a str.
|
||||||
|
ip_address = IPAddress(h.hostname) # type: ignore[arg-type]
|
||||||
except AddrFormatError:
|
except AddrFormatError:
|
||||||
# Not an IP
|
# Not an IP
|
||||||
pass
|
pass
|
||||||
|
@ -388,8 +390,8 @@ class SimpleHttpClient:
|
||||||
# by the DNS resolution.
|
# by the DNS resolution.
|
||||||
self.agent = BlacklistingAgentWrapper(
|
self.agent = BlacklistingAgentWrapper(
|
||||||
self.agent,
|
self.agent,
|
||||||
ip_whitelist=self._ip_whitelist,
|
|
||||||
ip_blacklist=self._ip_blacklist,
|
ip_blacklist=self._ip_blacklist,
|
||||||
|
ip_whitelist=self._ip_whitelist,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def request(
|
async def request(
|
||||||
|
|
|
@ -87,7 +87,7 @@ class MatrixFederationAgent:
|
||||||
reactor: ISynapseReactor,
|
reactor: ISynapseReactor,
|
||||||
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
|
||||||
user_agent: bytes,
|
user_agent: bytes,
|
||||||
ip_whitelist: IPSet,
|
ip_whitelist: Optional[IPSet],
|
||||||
ip_blacklist: IPSet,
|
ip_blacklist: IPSet,
|
||||||
_srv_resolver: Optional[SrvResolver] = None,
|
_srv_resolver: Optional[SrvResolver] = None,
|
||||||
_well_known_resolver: Optional[WellKnownResolver] = None,
|
_well_known_resolver: Optional[WellKnownResolver] = None,
|
||||||
|
|
|
@ -892,6 +892,10 @@ def set_cors_headers(request: SynapseRequest) -> None:
|
||||||
b"Access-Control-Allow-Headers",
|
b"Access-Control-Allow-Headers",
|
||||||
b"X-Requested-With, Content-Type, Authorization, Date",
|
b"X-Requested-With, Content-Type, Authorization, Date",
|
||||||
)
|
)
|
||||||
|
request.setHeader(
|
||||||
|
b"Access-Control-Expose-Headers",
|
||||||
|
b"Synapse-Trace-Id",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def set_corp_headers(request: Request) -> None:
|
def set_corp_headers(request: Request) -> None:
|
||||||
|
|
|
@ -0,0 +1,833 @@
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2020-2023 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import datetime
|
||||||
|
import errno
|
||||||
|
import fnmatch
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple
|
||||||
|
from urllib.parse import urljoin, urlparse, urlsplit
|
||||||
|
from urllib.request import urlopen
|
||||||
|
|
||||||
|
import attr
|
||||||
|
|
||||||
|
from twisted.internet.defer import Deferred
|
||||||
|
from twisted.internet.error import DNSLookupError
|
||||||
|
|
||||||
|
from synapse.api.errors import Codes, SynapseError
|
||||||
|
from synapse.http.client import SimpleHttpClient
|
||||||
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
|
from synapse.media._base import FileInfo, get_filename_from_headers
|
||||||
|
from synapse.media.media_storage import MediaStorage
|
||||||
|
from synapse.media.oembed import OEmbedProvider
|
||||||
|
from synapse.media.preview_html import decode_body, parse_html_to_open_graph
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.types import JsonDict, UserID
|
||||||
|
from synapse.util import json_encoder
|
||||||
|
from synapse.util.async_helpers import ObservableDeferred
|
||||||
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
|
from synapse.util.stringutils import random_string
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from synapse.media.media_repository import MediaRepository
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
OG_TAG_NAME_MAXLEN = 50
|
||||||
|
OG_TAG_VALUE_MAXLEN = 1000
|
||||||
|
|
||||||
|
ONE_HOUR = 60 * 60 * 1000
|
||||||
|
ONE_DAY = 24 * ONE_HOUR
|
||||||
|
IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
class DownloadResult:
|
||||||
|
length: int
|
||||||
|
uri: str
|
||||||
|
response_code: int
|
||||||
|
media_type: str
|
||||||
|
download_name: Optional[str]
|
||||||
|
expires: int
|
||||||
|
etag: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||||
|
class MediaInfo:
|
||||||
|
"""
|
||||||
|
Information parsed from downloading media being previewed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The Content-Type header of the response.
|
||||||
|
media_type: str
|
||||||
|
# The length (in bytes) of the downloaded media.
|
||||||
|
media_length: int
|
||||||
|
# The media filename, according to the server. This is parsed from the
|
||||||
|
# returned headers, if possible.
|
||||||
|
download_name: Optional[str]
|
||||||
|
# The time of the preview.
|
||||||
|
created_ts_ms: int
|
||||||
|
# Information from the media storage provider about where the file is stored
|
||||||
|
# on disk.
|
||||||
|
filesystem_id: str
|
||||||
|
filename: str
|
||||||
|
# The URI being previewed.
|
||||||
|
uri: str
|
||||||
|
# The HTTP response code.
|
||||||
|
response_code: int
|
||||||
|
# The timestamp (in milliseconds) of when this preview expires.
|
||||||
|
expires: int
|
||||||
|
# The ETag header of the response.
|
||||||
|
etag: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
class UrlPreviewer:
|
||||||
|
"""
|
||||||
|
Generates an Open Graph (https://ogp.me/) responses (with some Matrix
|
||||||
|
specific additions) for a given URL.
|
||||||
|
|
||||||
|
When Synapse is asked to preview a URL it does the following:
|
||||||
|
|
||||||
|
1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
|
||||||
|
config).
|
||||||
|
2. Checks the URL against an in-memory cache and returns the result if it exists. (This
|
||||||
|
is also used to de-duplicate processing of multiple in-flight requests at once.)
|
||||||
|
3. Kicks off a background process to generate a preview:
|
||||||
|
1. Checks URL and timestamp against the database cache and returns the result if it
|
||||||
|
has not expired and was successful (a 2xx return code).
|
||||||
|
2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it
|
||||||
|
does, update the URL to download.
|
||||||
|
3. Downloads the URL and stores it into a file via the media storage provider
|
||||||
|
and saves the local media metadata.
|
||||||
|
4. If the media is an image:
|
||||||
|
1. Generates thumbnails.
|
||||||
|
2. Generates an Open Graph response based on image properties.
|
||||||
|
5. If the media is HTML:
|
||||||
|
1. Decodes the HTML via the stored file.
|
||||||
|
2. Generates an Open Graph response from the HTML.
|
||||||
|
3. If a JSON oEmbed URL was found in the HTML via autodiscovery:
|
||||||
|
1. Downloads the URL and stores it into a file via the media storage provider
|
||||||
|
and saves the local media metadata.
|
||||||
|
2. Convert the oEmbed response to an Open Graph response.
|
||||||
|
3. Override any Open Graph data from the HTML with data from oEmbed.
|
||||||
|
4. If an image exists in the Open Graph response:
|
||||||
|
1. Downloads the URL and stores it into a file via the media storage
|
||||||
|
provider and saves the local media metadata.
|
||||||
|
2. Generates thumbnails.
|
||||||
|
3. Updates the Open Graph response based on image properties.
|
||||||
|
6. If the media is JSON and an oEmbed URL was found:
|
||||||
|
1. Convert the oEmbed response to an Open Graph response.
|
||||||
|
2. If a thumbnail or image is in the oEmbed response:
|
||||||
|
1. Downloads the URL and stores it into a file via the media storage
|
||||||
|
provider and saves the local media metadata.
|
||||||
|
2. Generates thumbnails.
|
||||||
|
3. Updates the Open Graph response based on image properties.
|
||||||
|
7. Stores the result in the database cache.
|
||||||
|
4. Returns the result.
|
||||||
|
|
||||||
|
If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or
|
||||||
|
image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole
|
||||||
|
does not fail. As much information as possible is returned.
|
||||||
|
|
||||||
|
The in-memory cache expires after 1 hour.
|
||||||
|
|
||||||
|
Expired entries in the database cache (and their associated media files) are
|
||||||
|
deleted every 10 seconds. The default expiration time is 1 hour from download.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
hs: "HomeServer",
|
||||||
|
media_repo: "MediaRepository",
|
||||||
|
media_storage: MediaStorage,
|
||||||
|
):
|
||||||
|
self.clock = hs.get_clock()
|
||||||
|
self.filepaths = media_repo.filepaths
|
||||||
|
self.max_spider_size = hs.config.media.max_spider_size
|
||||||
|
self.server_name = hs.hostname
|
||||||
|
self.store = hs.get_datastores().main
|
||||||
|
self.client = SimpleHttpClient(
|
||||||
|
hs,
|
||||||
|
treq_args={"browser_like_redirects": True},
|
||||||
|
ip_whitelist=hs.config.media.url_preview_ip_range_whitelist,
|
||||||
|
ip_blacklist=hs.config.media.url_preview_ip_range_blacklist,
|
||||||
|
use_proxy=True,
|
||||||
|
)
|
||||||
|
self.media_repo = media_repo
|
||||||
|
self.primary_base_path = media_repo.primary_base_path
|
||||||
|
self.media_storage = media_storage
|
||||||
|
|
||||||
|
self._oembed = OEmbedProvider(hs)
|
||||||
|
|
||||||
|
# We run the background jobs if we're the instance specified (or no
|
||||||
|
# instance is specified, where we assume there is only one instance
|
||||||
|
# serving media).
|
||||||
|
instance_running_jobs = hs.config.media.media_instance_running_background_jobs
|
||||||
|
self._worker_run_media_background_jobs = (
|
||||||
|
instance_running_jobs is None
|
||||||
|
or instance_running_jobs == hs.get_instance_name()
|
||||||
|
)
|
||||||
|
|
||||||
|
self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist
|
||||||
|
self.url_preview_accept_language = hs.config.media.url_preview_accept_language
|
||||||
|
|
||||||
|
# memory cache mapping urls to an ObservableDeferred returning
|
||||||
|
# JSON-encoded OG metadata
|
||||||
|
self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache(
|
||||||
|
cache_name="url_previews",
|
||||||
|
clock=self.clock,
|
||||||
|
# don't spider URLs more often than once an hour
|
||||||
|
expiry_ms=ONE_HOUR,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._worker_run_media_background_jobs:
|
||||||
|
self._cleaner_loop = self.clock.looping_call(
|
||||||
|
self._start_expire_url_cache_data, 10 * 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
async def preview(self, url: str, user: UserID, ts: int) -> bytes:
|
||||||
|
# XXX: we could move this into _do_preview if we wanted.
|
||||||
|
url_tuple = urlsplit(url)
|
||||||
|
for entry in self.url_preview_url_blacklist:
|
||||||
|
match = True
|
||||||
|
for attrib in entry:
|
||||||
|
pattern = entry[attrib]
|
||||||
|
value = getattr(url_tuple, attrib)
|
||||||
|
logger.debug(
|
||||||
|
"Matching attrib '%s' with value '%s' against pattern '%s'",
|
||||||
|
attrib,
|
||||||
|
value,
|
||||||
|
pattern,
|
||||||
|
)
|
||||||
|
|
||||||
|
if value is None:
|
||||||
|
match = False
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Some attributes might not be parsed as strings by urlsplit (such as the
|
||||||
|
# port, which is parsed as an int). Because we use match functions that
|
||||||
|
# expect strings, we want to make sure that's what we give them.
|
||||||
|
value_str = str(value)
|
||||||
|
|
||||||
|
if pattern.startswith("^"):
|
||||||
|
if not re.match(pattern, value_str):
|
||||||
|
match = False
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
if not fnmatch.fnmatch(value_str, pattern):
|
||||||
|
match = False
|
||||||
|
continue
|
||||||
|
if match:
|
||||||
|
logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
|
||||||
|
raise SynapseError(
|
||||||
|
403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
|
||||||
|
)
|
||||||
|
|
||||||
|
# the in-memory cache:
|
||||||
|
# * ensures that only one request is active at a time
|
||||||
|
# * takes load off the DB for the thundering herds
|
||||||
|
# * also caches any failures (unlike the DB) so we don't keep
|
||||||
|
# requesting the same endpoint
|
||||||
|
|
||||||
|
observable = self._cache.get(url)
|
||||||
|
|
||||||
|
if not observable:
|
||||||
|
download = run_in_background(self._do_preview, url, user, ts)
|
||||||
|
observable = ObservableDeferred(download, consumeErrors=True)
|
||||||
|
self._cache[url] = observable
|
||||||
|
else:
|
||||||
|
logger.info("Returning cached response")
|
||||||
|
|
||||||
|
return await make_deferred_yieldable(observable.observe())
|
||||||
|
|
||||||
|
async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
|
||||||
|
"""Check the db, and download the URL and build a preview
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: The URL to preview.
|
||||||
|
user: The user requesting the preview.
|
||||||
|
ts: The timestamp requested for the preview.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
json-encoded og data
|
||||||
|
"""
|
||||||
|
# check the URL cache in the DB (which will also provide us with
|
||||||
|
# historical previews, if we have any)
|
||||||
|
cache_result = await self.store.get_url_cache(url, ts)
|
||||||
|
if (
|
||||||
|
cache_result
|
||||||
|
and cache_result["expires_ts"] > ts
|
||||||
|
and cache_result["response_code"] / 100 == 2
|
||||||
|
):
|
||||||
|
# It may be stored as text in the database, not as bytes (such as
|
||||||
|
# PostgreSQL). If so, encode it back before handing it on.
|
||||||
|
og = cache_result["og"]
|
||||||
|
if isinstance(og, str):
|
||||||
|
og = og.encode("utf8")
|
||||||
|
return og
|
||||||
|
|
||||||
|
# If this URL can be accessed via oEmbed, use that instead.
|
||||||
|
url_to_download = url
|
||||||
|
oembed_url = self._oembed.get_oembed_url(url)
|
||||||
|
if oembed_url:
|
||||||
|
url_to_download = oembed_url
|
||||||
|
|
||||||
|
media_info = await self._handle_url(url_to_download, user)
|
||||||
|
|
||||||
|
logger.debug("got media_info of '%s'", media_info)
|
||||||
|
|
||||||
|
# The number of milliseconds that the response should be considered valid.
|
||||||
|
expiration_ms = media_info.expires
|
||||||
|
author_name: Optional[str] = None
|
||||||
|
|
||||||
|
if _is_media(media_info.media_type):
|
||||||
|
file_id = media_info.filesystem_id
|
||||||
|
dims = await self.media_repo._generate_thumbnails(
|
||||||
|
None, file_id, file_id, media_info.media_type, url_cache=True
|
||||||
|
)
|
||||||
|
|
||||||
|
og = {
|
||||||
|
"og:description": media_info.download_name,
|
||||||
|
"og:image": f"mxc://{self.server_name}/{media_info.filesystem_id}",
|
||||||
|
"og:image:type": media_info.media_type,
|
||||||
|
"matrix:image:size": media_info.media_length,
|
||||||
|
}
|
||||||
|
|
||||||
|
if dims:
|
||||||
|
og["og:image:width"] = dims["width"]
|
||||||
|
og["og:image:height"] = dims["height"]
|
||||||
|
else:
|
||||||
|
logger.warning("Couldn't get dims for %s" % url)
|
||||||
|
|
||||||
|
# define our OG response for this media
|
||||||
|
elif _is_html(media_info.media_type):
|
||||||
|
# TODO: somehow stop a big HTML tree from exploding synapse's RAM
|
||||||
|
|
||||||
|
with open(media_info.filename, "rb") as file:
|
||||||
|
body = file.read()
|
||||||
|
|
||||||
|
tree = decode_body(body, media_info.uri, media_info.media_type)
|
||||||
|
if tree is not None:
|
||||||
|
# Check if this HTML document points to oEmbed information and
|
||||||
|
# defer to that.
|
||||||
|
oembed_url = self._oembed.autodiscover_from_html(tree)
|
||||||
|
og_from_oembed: JsonDict = {}
|
||||||
|
if oembed_url:
|
||||||
|
try:
|
||||||
|
oembed_info = await self._handle_url(
|
||||||
|
oembed_url, user, allow_data_urls=True
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
# Fetching the oEmbed info failed, don't block the entire URL preview.
|
||||||
|
logger.warning(
|
||||||
|
"oEmbed fetch failed during URL preview: %s errored with %s",
|
||||||
|
oembed_url,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
(
|
||||||
|
og_from_oembed,
|
||||||
|
author_name,
|
||||||
|
expiration_ms,
|
||||||
|
) = await self._handle_oembed_response(
|
||||||
|
url, oembed_info, expiration_ms
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse Open Graph information from the HTML in case the oEmbed
|
||||||
|
# response failed or is incomplete.
|
||||||
|
og_from_html = parse_html_to_open_graph(tree)
|
||||||
|
|
||||||
|
# Compile the Open Graph response by using the scraped
|
||||||
|
# information from the HTML and overlaying any information
|
||||||
|
# from the oEmbed response.
|
||||||
|
og = {**og_from_html, **og_from_oembed}
|
||||||
|
|
||||||
|
await self._precache_image_url(user, media_info, og)
|
||||||
|
else:
|
||||||
|
og = {}
|
||||||
|
|
||||||
|
elif oembed_url:
|
||||||
|
# Handle the oEmbed information.
|
||||||
|
og, author_name, expiration_ms = await self._handle_oembed_response(
|
||||||
|
url, media_info, expiration_ms
|
||||||
|
)
|
||||||
|
await self._precache_image_url(user, media_info, og)
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.warning("Failed to find any OG data in %s", url)
|
||||||
|
og = {}
|
||||||
|
|
||||||
|
# If we don't have a title but we have author_name, copy it as
|
||||||
|
# title
|
||||||
|
if not og.get("og:title") and author_name:
|
||||||
|
og["og:title"] = author_name
|
||||||
|
|
||||||
|
# filter out any stupidly long values
|
||||||
|
keys_to_remove = []
|
||||||
|
for k, v in og.items():
|
||||||
|
# values can be numeric as well as strings, hence the cast to str
|
||||||
|
if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN:
|
||||||
|
logger.warning(
|
||||||
|
"Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN]
|
||||||
|
)
|
||||||
|
keys_to_remove.append(k)
|
||||||
|
for k in keys_to_remove:
|
||||||
|
del og[k]
|
||||||
|
|
||||||
|
logger.debug("Calculated OG for %s as %s", url, og)
|
||||||
|
|
||||||
|
jsonog = json_encoder.encode(og)
|
||||||
|
|
||||||
|
# Cap the amount of time to consider a response valid.
|
||||||
|
expiration_ms = min(expiration_ms, ONE_DAY)
|
||||||
|
|
||||||
|
# store OG in history-aware DB cache
|
||||||
|
await self.store.store_url_cache(
|
||||||
|
url,
|
||||||
|
media_info.response_code,
|
||||||
|
media_info.etag,
|
||||||
|
media_info.created_ts_ms + expiration_ms,
|
||||||
|
jsonog,
|
||||||
|
media_info.filesystem_id,
|
||||||
|
media_info.created_ts_ms,
|
||||||
|
)
|
||||||
|
|
||||||
|
return jsonog.encode("utf8")
|
||||||
|
|
||||||
|
async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult:
|
||||||
|
"""
|
||||||
|
Fetches a remote URL and parses the headers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: The URL to fetch.
|
||||||
|
output_stream: The stream to write the content to.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple of:
|
||||||
|
Media length, URL downloaded, the HTTP response code,
|
||||||
|
the media type, the downloaded file name, the number of
|
||||||
|
milliseconds the result is valid for, the etag header.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
logger.debug("Trying to get preview for url '%s'", url)
|
||||||
|
length, headers, uri, code = await self.client.get_file(
|
||||||
|
url,
|
||||||
|
output_stream=output_stream,
|
||||||
|
max_size=self.max_spider_size,
|
||||||
|
headers={
|
||||||
|
b"Accept-Language": self.url_preview_accept_language,
|
||||||
|
# Use a custom user agent for the preview because some sites will only return
|
||||||
|
# Open Graph metadata to crawler user agents. Omit the Synapse version
|
||||||
|
# string to avoid leaking information.
|
||||||
|
b"User-Agent": [
|
||||||
|
"Synapse (bot; +https://github.com/matrix-org/synapse)"
|
||||||
|
],
|
||||||
|
},
|
||||||
|
is_allowed_content_type=_is_previewable,
|
||||||
|
)
|
||||||
|
except SynapseError:
|
||||||
|
# Pass SynapseErrors through directly, so that the servlet
|
||||||
|
# handler will return a SynapseError to the client instead of
|
||||||
|
# blank data or a 500.
|
||||||
|
raise
|
||||||
|
except DNSLookupError:
|
||||||
|
# DNS lookup returned no results
|
||||||
|
# Note: This will also be the case if one of the resolved IP
|
||||||
|
# addresses is blacklisted
|
||||||
|
raise SynapseError(
|
||||||
|
502,
|
||||||
|
"DNS resolution failure during URL preview generation",
|
||||||
|
Codes.UNKNOWN,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
# FIXME: pass through 404s and other error messages nicely
|
||||||
|
logger.warning("Error downloading %s: %r", url, e)
|
||||||
|
|
||||||
|
raise SynapseError(
|
||||||
|
500,
|
||||||
|
"Failed to download content: %s"
|
||||||
|
% (traceback.format_exception_only(sys.exc_info()[0], e),),
|
||||||
|
Codes.UNKNOWN,
|
||||||
|
)
|
||||||
|
|
||||||
|
if b"Content-Type" in headers:
|
||||||
|
media_type = headers[b"Content-Type"][0].decode("ascii")
|
||||||
|
else:
|
||||||
|
media_type = "application/octet-stream"
|
||||||
|
|
||||||
|
download_name = get_filename_from_headers(headers)
|
||||||
|
|
||||||
|
# FIXME: we should calculate a proper expiration based on the
|
||||||
|
# Cache-Control and Expire headers. But for now, assume 1 hour.
|
||||||
|
expires = ONE_HOUR
|
||||||
|
etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
|
||||||
|
|
||||||
|
return DownloadResult(
|
||||||
|
length, uri, code, media_type, download_name, expires, etag
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _parse_data_url(
|
||||||
|
self, url: str, output_stream: BinaryIO
|
||||||
|
) -> DownloadResult:
|
||||||
|
"""
|
||||||
|
Parses a data: URL.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: The URL to parse.
|
||||||
|
output_stream: The stream to write the content to.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple of:
|
||||||
|
Media length, URL downloaded, the HTTP response code,
|
||||||
|
the media type, the downloaded file name, the number of
|
||||||
|
milliseconds the result is valid for, the etag header.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
logger.debug("Trying to parse data url '%s'", url)
|
||||||
|
with urlopen(url) as url_info:
|
||||||
|
# TODO Can this be more efficient.
|
||||||
|
output_stream.write(url_info.read())
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Error parsing data: URL %s: %r", url, e)
|
||||||
|
|
||||||
|
raise SynapseError(
|
||||||
|
500,
|
||||||
|
"Failed to parse data URL: %s"
|
||||||
|
% (traceback.format_exception_only(sys.exc_info()[0], e),),
|
||||||
|
Codes.UNKNOWN,
|
||||||
|
)
|
||||||
|
|
||||||
|
return DownloadResult(
|
||||||
|
# Read back the length that has been written.
|
||||||
|
length=output_stream.tell(),
|
||||||
|
uri=url,
|
||||||
|
# If it was parsed, consider this a 200 OK.
|
||||||
|
response_code=200,
|
||||||
|
# urlopen shoves the media-type from the data URL into the content type
|
||||||
|
# header object.
|
||||||
|
media_type=url_info.headers.get_content_type(),
|
||||||
|
# Some features are not supported by data: URLs.
|
||||||
|
download_name=None,
|
||||||
|
expires=ONE_HOUR,
|
||||||
|
etag=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _handle_url(
|
||||||
|
self, url: str, user: UserID, allow_data_urls: bool = False
|
||||||
|
) -> MediaInfo:
|
||||||
|
"""
|
||||||
|
Fetches content from a URL and parses the result to generate a MediaInfo.
|
||||||
|
|
||||||
|
It uses the media storage provider to persist the fetched content and
|
||||||
|
stores the mapping into the database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: The URL to fetch.
|
||||||
|
user: The user who ahs requested this URL.
|
||||||
|
allow_data_urls: True if data URLs should be allowed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A MediaInfo object describing the fetched content.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO: we should probably honour robots.txt... except in practice
|
||||||
|
# we're most likely being explicitly triggered by a human rather than a
|
||||||
|
# bot, so are we really a robot?
|
||||||
|
|
||||||
|
file_id = datetime.date.today().isoformat() + "_" + random_string(16)
|
||||||
|
|
||||||
|
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
|
||||||
|
|
||||||
|
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
||||||
|
if url.startswith("data:"):
|
||||||
|
if not allow_data_urls:
|
||||||
|
raise SynapseError(
|
||||||
|
500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN
|
||||||
|
)
|
||||||
|
|
||||||
|
download_result = await self._parse_data_url(url, f)
|
||||||
|
else:
|
||||||
|
download_result = await self._download_url(url, f)
|
||||||
|
|
||||||
|
await finish()
|
||||||
|
|
||||||
|
try:
|
||||||
|
time_now_ms = self.clock.time_msec()
|
||||||
|
|
||||||
|
await self.store.store_local_media(
|
||||||
|
media_id=file_id,
|
||||||
|
media_type=download_result.media_type,
|
||||||
|
time_now_ms=time_now_ms,
|
||||||
|
upload_name=download_result.download_name,
|
||||||
|
media_length=download_result.length,
|
||||||
|
user_id=user,
|
||||||
|
url_cache=url,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error("Error handling downloaded %s: %r", url, e)
|
||||||
|
# TODO: we really ought to delete the downloaded file in this
|
||||||
|
# case, since we won't have recorded it in the db, and will
|
||||||
|
# therefore not expire it.
|
||||||
|
raise
|
||||||
|
|
||||||
|
return MediaInfo(
|
||||||
|
media_type=download_result.media_type,
|
||||||
|
media_length=download_result.length,
|
||||||
|
download_name=download_result.download_name,
|
||||||
|
created_ts_ms=time_now_ms,
|
||||||
|
filesystem_id=file_id,
|
||||||
|
filename=fname,
|
||||||
|
uri=download_result.uri,
|
||||||
|
response_code=download_result.response_code,
|
||||||
|
expires=download_result.expires,
|
||||||
|
etag=download_result.etag,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _precache_image_url(
|
||||||
|
self, user: UserID, media_info: MediaInfo, og: JsonDict
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Pre-cache the image (if one exists) for posterity
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user: The user requesting the preview.
|
||||||
|
media_info: The media being previewed.
|
||||||
|
og: The Open Graph dictionary. This is modified with image information.
|
||||||
|
"""
|
||||||
|
# If there's no image or it is blank, there's nothing to do.
|
||||||
|
if "og:image" not in og:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Remove the raw image URL, this will be replaced with an MXC URL, if successful.
|
||||||
|
image_url = og.pop("og:image")
|
||||||
|
if not image_url:
|
||||||
|
return
|
||||||
|
|
||||||
|
# The image URL from the HTML might be relative to the previewed page,
|
||||||
|
# convert it to an URL which can be requested directly.
|
||||||
|
url_parts = urlparse(image_url)
|
||||||
|
if url_parts.scheme != "data":
|
||||||
|
image_url = urljoin(media_info.uri, image_url)
|
||||||
|
|
||||||
|
# FIXME: it might be cleaner to use the same flow as the main /preview_url
|
||||||
|
# request itself and benefit from the same caching etc. But for now we
|
||||||
|
# just rely on the caching on the master request to speed things up.
|
||||||
|
try:
|
||||||
|
image_info = await self._handle_url(image_url, user, allow_data_urls=True)
|
||||||
|
except Exception as e:
|
||||||
|
# Pre-caching the image failed, don't block the entire URL preview.
|
||||||
|
logger.warning(
|
||||||
|
"Pre-caching image failed during URL preview: %s errored with %s",
|
||||||
|
image_url,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if _is_media(image_info.media_type):
|
||||||
|
# TODO: make sure we don't choke on white-on-transparent images
|
||||||
|
file_id = image_info.filesystem_id
|
||||||
|
dims = await self.media_repo._generate_thumbnails(
|
||||||
|
None, file_id, file_id, image_info.media_type, url_cache=True
|
||||||
|
)
|
||||||
|
if dims:
|
||||||
|
og["og:image:width"] = dims["width"]
|
||||||
|
og["og:image:height"] = dims["height"]
|
||||||
|
else:
|
||||||
|
logger.warning("Couldn't get dims for %s", image_url)
|
||||||
|
|
||||||
|
og["og:image"] = f"mxc://{self.server_name}/{image_info.filesystem_id}"
|
||||||
|
og["og:image:type"] = image_info.media_type
|
||||||
|
og["matrix:image:size"] = image_info.media_length
|
||||||
|
|
||||||
|
async def _handle_oembed_response(
|
||||||
|
self, url: str, media_info: MediaInfo, expiration_ms: int
|
||||||
|
) -> Tuple[JsonDict, Optional[str], int]:
|
||||||
|
"""
|
||||||
|
Parse the downloaded oEmbed info.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: The URL which is being previewed (not the one which was
|
||||||
|
requested).
|
||||||
|
media_info: The media being previewed.
|
||||||
|
expiration_ms: The length of time, in milliseconds, the media is valid for.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple of:
|
||||||
|
The Open Graph dictionary, if the oEmbed info can be parsed.
|
||||||
|
The author name if it could be retrieved from oEmbed.
|
||||||
|
The (possibly updated) length of time, in milliseconds, the media is valid for.
|
||||||
|
"""
|
||||||
|
# If JSON was not returned, there's nothing to do.
|
||||||
|
if not _is_json(media_info.media_type):
|
||||||
|
return {}, None, expiration_ms
|
||||||
|
|
||||||
|
with open(media_info.filename, "rb") as file:
|
||||||
|
body = file.read()
|
||||||
|
|
||||||
|
oembed_response = self._oembed.parse_oembed_response(url, body)
|
||||||
|
open_graph_result = oembed_response.open_graph_result
|
||||||
|
|
||||||
|
# Use the cache age from the oEmbed result, if one was given.
|
||||||
|
if open_graph_result and oembed_response.cache_age is not None:
|
||||||
|
expiration_ms = oembed_response.cache_age
|
||||||
|
|
||||||
|
return open_graph_result, oembed_response.author_name, expiration_ms
|
||||||
|
|
||||||
|
def _start_expire_url_cache_data(self) -> Deferred:
|
||||||
|
return run_as_background_process(
|
||||||
|
"expire_url_cache_data", self._expire_url_cache_data
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _expire_url_cache_data(self) -> None:
|
||||||
|
"""Clean up expired url cache content, media and thumbnails."""
|
||||||
|
|
||||||
|
assert self._worker_run_media_background_jobs
|
||||||
|
|
||||||
|
now = self.clock.time_msec()
|
||||||
|
|
||||||
|
logger.debug("Running url preview cache expiry")
|
||||||
|
|
||||||
|
def try_remove_parent_dirs(dirs: Iterable[str]) -> None:
|
||||||
|
"""Attempt to remove the given chain of parent directories
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dirs: The list of directory paths to delete, with children appearing
|
||||||
|
before their parents.
|
||||||
|
"""
|
||||||
|
for dir in dirs:
|
||||||
|
try:
|
||||||
|
os.rmdir(dir)
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Already deleted, continue with deleting the rest
|
||||||
|
pass
|
||||||
|
except OSError as e:
|
||||||
|
# Failed, skip deleting the rest of the parent dirs
|
||||||
|
if e.errno != errno.ENOTEMPTY:
|
||||||
|
logger.warning(
|
||||||
|
"Failed to remove media directory while clearing url preview cache: %r: %s",
|
||||||
|
dir,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
# First we delete expired url cache entries
|
||||||
|
media_ids = await self.store.get_expired_url_cache(now)
|
||||||
|
|
||||||
|
removed_media = []
|
||||||
|
for media_id in media_ids:
|
||||||
|
fname = self.filepaths.url_cache_filepath(media_id)
|
||||||
|
try:
|
||||||
|
os.remove(fname)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass # If the path doesn't exist, meh
|
||||||
|
except OSError as e:
|
||||||
|
logger.warning(
|
||||||
|
"Failed to remove media while clearing url preview cache: %r: %s",
|
||||||
|
media_id,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
removed_media.append(media_id)
|
||||||
|
|
||||||
|
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
||||||
|
try_remove_parent_dirs(dirs)
|
||||||
|
|
||||||
|
await self.store.delete_url_cache(removed_media)
|
||||||
|
|
||||||
|
if removed_media:
|
||||||
|
logger.debug(
|
||||||
|
"Deleted %d entries from url preview cache", len(removed_media)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug("No entries removed from url preview cache")
|
||||||
|
|
||||||
|
# Now we delete old images associated with the url cache.
|
||||||
|
# These may be cached for a bit on the client (i.e., they
|
||||||
|
# may have a room open with a preview url thing open).
|
||||||
|
# So we wait a couple of days before deleting, just in case.
|
||||||
|
expire_before = now - IMAGE_CACHE_EXPIRY_MS
|
||||||
|
media_ids = await self.store.get_url_cache_media_before(expire_before)
|
||||||
|
|
||||||
|
removed_media = []
|
||||||
|
for media_id in media_ids:
|
||||||
|
fname = self.filepaths.url_cache_filepath(media_id)
|
||||||
|
try:
|
||||||
|
os.remove(fname)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass # If the path doesn't exist, meh
|
||||||
|
except OSError as e:
|
||||||
|
logger.warning(
|
||||||
|
"Failed to remove media from url preview cache: %r: %s", media_id, e
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
||||||
|
try_remove_parent_dirs(dirs)
|
||||||
|
|
||||||
|
thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
|
||||||
|
try:
|
||||||
|
shutil.rmtree(thumbnail_dir)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass # If the path doesn't exist, meh
|
||||||
|
except OSError as e:
|
||||||
|
logger.warning(
|
||||||
|
"Failed to remove media from url preview cache: %r: %s", media_id, e
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
removed_media.append(media_id)
|
||||||
|
|
||||||
|
dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
|
||||||
|
# Note that one of the directories to be deleted has already been
|
||||||
|
# removed by the `rmtree` above.
|
||||||
|
try_remove_parent_dirs(dirs)
|
||||||
|
|
||||||
|
await self.store.delete_url_cache_media(removed_media)
|
||||||
|
|
||||||
|
if removed_media:
|
||||||
|
logger.debug("Deleted %d media from url preview cache", len(removed_media))
|
||||||
|
else:
|
||||||
|
logger.debug("No media removed from url preview cache")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_media(content_type: str) -> bool:
|
||||||
|
return content_type.lower().startswith("image/")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_html(content_type: str) -> bool:
|
||||||
|
content_type = content_type.lower()
|
||||||
|
return content_type.startswith("text/html") or content_type.startswith(
|
||||||
|
"application/xhtml"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_json(content_type: str) -> bool:
|
||||||
|
return content_type.lower().startswith("application/json")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_previewable(content_type: str) -> bool:
|
||||||
|
"""Returns True for content types for which we will perform URL preview and False
|
||||||
|
otherwise."""
|
||||||
|
|
||||||
|
return _is_html(content_type) or _is_media(content_type) or _is_json(content_type)
|
|
@ -73,13 +73,6 @@ from synapse.events.third_party_rules import (
|
||||||
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
|
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
|
||||||
)
|
)
|
||||||
from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK
|
from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK
|
||||||
from synapse.handlers.account_validity import (
|
|
||||||
IS_USER_EXPIRED_CALLBACK,
|
|
||||||
ON_LEGACY_ADMIN_REQUEST,
|
|
||||||
ON_LEGACY_RENEW_CALLBACK,
|
|
||||||
ON_LEGACY_SEND_MAIL_CALLBACK,
|
|
||||||
ON_USER_REGISTRATION_CALLBACK,
|
|
||||||
)
|
|
||||||
from synapse.handlers.auth import (
|
from synapse.handlers.auth import (
|
||||||
CHECK_3PID_AUTH_CALLBACK,
|
CHECK_3PID_AUTH_CALLBACK,
|
||||||
CHECK_AUTH_CALLBACK,
|
CHECK_AUTH_CALLBACK,
|
||||||
|
@ -105,6 +98,13 @@ from synapse.logging.context import (
|
||||||
run_in_background,
|
run_in_background,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||||
|
IS_USER_EXPIRED_CALLBACK,
|
||||||
|
ON_LEGACY_ADMIN_REQUEST,
|
||||||
|
ON_LEGACY_RENEW_CALLBACK,
|
||||||
|
ON_LEGACY_SEND_MAIL_CALLBACK,
|
||||||
|
ON_USER_REGISTRATION_CALLBACK,
|
||||||
|
)
|
||||||
from synapse.rest.client.login import LoginResponse
|
from synapse.rest.client.login import LoginResponse
|
||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.storage.background_updates import (
|
from synapse.storage.background_updates import (
|
||||||
|
@ -250,6 +250,7 @@ class ModuleApi:
|
||||||
self._push_rules_handler = hs.get_push_rules_handler()
|
self._push_rules_handler = hs.get_push_rules_handler()
|
||||||
self._device_handler = hs.get_device_handler()
|
self._device_handler = hs.get_device_handler()
|
||||||
self.custom_template_dir = hs.config.server.custom_template_directory
|
self.custom_template_dir = hs.config.server.custom_template_directory
|
||||||
|
self._callbacks = hs.get_module_api_callbacks()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
app_name = self._hs.config.email.email_app_name
|
app_name = self._hs.config.email.email_app_name
|
||||||
|
@ -271,7 +272,6 @@ class ModuleApi:
|
||||||
self._account_data_manager = AccountDataManager(hs)
|
self._account_data_manager = AccountDataManager(hs)
|
||||||
|
|
||||||
self._spam_checker = hs.get_spam_checker()
|
self._spam_checker = hs.get_spam_checker()
|
||||||
self._account_validity_handler = hs.get_account_validity_handler()
|
|
||||||
self._third_party_event_rules = hs.get_third_party_event_rules()
|
self._third_party_event_rules = hs.get_third_party_event_rules()
|
||||||
self._password_auth_provider = hs.get_password_auth_provider()
|
self._password_auth_provider = hs.get_password_auth_provider()
|
||||||
self._presence_router = hs.get_presence_router()
|
self._presence_router = hs.get_presence_router()
|
||||||
|
@ -332,7 +332,7 @@ class ModuleApi:
|
||||||
|
|
||||||
Added in Synapse v1.39.0.
|
Added in Synapse v1.39.0.
|
||||||
"""
|
"""
|
||||||
return self._account_validity_handler.register_account_validity_callbacks(
|
return self._callbacks.account_validity.register_callbacks(
|
||||||
is_user_expired=is_user_expired,
|
is_user_expired=is_user_expired,
|
||||||
on_user_registration=on_user_registration,
|
on_user_registration=on_user_registration,
|
||||||
on_legacy_send_mail=on_legacy_send_mail,
|
on_legacy_send_mail=on_legacy_send_mail,
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.module_api.callbacks.account_validity_callbacks import (
|
||||||
|
AccountValidityModuleApiCallbacks,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleApiCallbacks:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.account_validity = AccountValidityModuleApiCallbacks()
|
|
@ -0,0 +1,93 @@
|
||||||
|
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Awaitable, Callable, List, Optional, Tuple
|
||||||
|
|
||||||
|
from twisted.web.http import Request
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Types for callbacks to be registered via the module api
|
||||||
|
IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
|
||||||
|
ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
|
||||||
|
# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
|
||||||
|
# to `/_synapse/client/account_validity`. See `register_callbacks` below.
|
||||||
|
ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
|
||||||
|
ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
|
||||||
|
ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
|
||||||
|
|
||||||
|
|
||||||
|
class AccountValidityModuleApiCallbacks:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
|
||||||
|
self.on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
|
||||||
|
self.on_legacy_send_mail_callback: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None
|
||||||
|
self.on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
|
||||||
|
|
||||||
|
# The legacy admin requests callback isn't a protected attribute because we need
|
||||||
|
# to access it from the admin servlet, which is outside of this handler.
|
||||||
|
self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
|
||||||
|
|
||||||
|
def register_callbacks(
|
||||||
|
self,
|
||||||
|
is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
|
||||||
|
on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
|
||||||
|
on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
|
||||||
|
on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
|
||||||
|
on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
|
||||||
|
) -> None:
|
||||||
|
"""Register callbacks from module for each hook."""
|
||||||
|
if is_user_expired is not None:
|
||||||
|
self.is_user_expired_callbacks.append(is_user_expired)
|
||||||
|
|
||||||
|
if on_user_registration is not None:
|
||||||
|
self.on_user_registration_callbacks.append(on_user_registration)
|
||||||
|
|
||||||
|
# The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
|
||||||
|
# an admin one). As part of moving the feature into a module, we need to change
|
||||||
|
# the path from /_matrix/client/unstable/account_validity/... to
|
||||||
|
# /_synapse/client/account_validity, because:
|
||||||
|
#
|
||||||
|
# * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
|
||||||
|
# * the way we register servlets means that modules can't register resources
|
||||||
|
# under /_matrix/client
|
||||||
|
#
|
||||||
|
# We need to allow for a transition period between the old and new endpoints
|
||||||
|
# in order to allow for clients to update (and for emails to be processed).
|
||||||
|
#
|
||||||
|
# Once the email-account-validity module is loaded, it will take control of account
|
||||||
|
# validity by moving the rows from our `account_validity` table into its own table.
|
||||||
|
#
|
||||||
|
# Therefore, we need to allow modules (in practice just the one implementing the
|
||||||
|
# email-based account validity) to temporarily hook into the legacy endpoints so we
|
||||||
|
# can route the traffic coming into the old endpoints into the module, which is
|
||||||
|
# why we have the following three temporary hooks.
|
||||||
|
if on_legacy_send_mail is not None:
|
||||||
|
if self.on_legacy_send_mail_callback is not None:
|
||||||
|
raise RuntimeError("Tried to register on_legacy_send_mail twice")
|
||||||
|
|
||||||
|
self.on_legacy_send_mail_callback = on_legacy_send_mail
|
||||||
|
|
||||||
|
if on_legacy_renew is not None:
|
||||||
|
if self.on_legacy_renew_callback is not None:
|
||||||
|
raise RuntimeError("Tried to register on_legacy_renew twice")
|
||||||
|
|
||||||
|
self.on_legacy_renew_callback = on_legacy_renew
|
||||||
|
|
||||||
|
if on_legacy_admin_request is not None:
|
||||||
|
if self.on_legacy_admin_request_callback is not None:
|
||||||
|
raise RuntimeError("Tried to register on_legacy_admin_request twice")
|
||||||
|
|
||||||
|
self.on_legacy_admin_request_callback = on_legacy_admin_request
|
|
@ -273,10 +273,7 @@ class BulkPushRuleEvaluator:
|
||||||
related_event_id, allow_none=True
|
related_event_id, allow_none=True
|
||||||
)
|
)
|
||||||
if related_event is not None:
|
if related_event is not None:
|
||||||
related_events[relation_type] = _flatten_dict(
|
related_events[relation_type] = _flatten_dict(related_event)
|
||||||
related_event,
|
|
||||||
msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
reply_event_id = (
|
reply_event_id = (
|
||||||
event.content.get("m.relates_to", {})
|
event.content.get("m.relates_to", {})
|
||||||
|
@ -291,10 +288,7 @@ class BulkPushRuleEvaluator:
|
||||||
)
|
)
|
||||||
|
|
||||||
if related_event is not None:
|
if related_event is not None:
|
||||||
related_events["m.in_reply_to"] = _flatten_dict(
|
related_events["m.in_reply_to"] = _flatten_dict(related_event)
|
||||||
related_event,
|
|
||||||
msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
# indicate that this is from a fallback relation.
|
# indicate that this is from a fallback relation.
|
||||||
if relation_type == "m.thread" and event.content.get(
|
if relation_type == "m.thread" and event.content.get(
|
||||||
|
@ -401,10 +395,7 @@ class BulkPushRuleEvaluator:
|
||||||
)
|
)
|
||||||
|
|
||||||
evaluator = PushRuleEvaluator(
|
evaluator = PushRuleEvaluator(
|
||||||
_flatten_dict(
|
_flatten_dict(event),
|
||||||
event,
|
|
||||||
msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key,
|
|
||||||
),
|
|
||||||
has_mentions,
|
has_mentions,
|
||||||
room_member_count,
|
room_member_count,
|
||||||
sender_power_level,
|
sender_power_level,
|
||||||
|
@ -413,7 +404,6 @@ class BulkPushRuleEvaluator:
|
||||||
self._related_event_match_enabled,
|
self._related_event_match_enabled,
|
||||||
event.room_version.msc3931_push_features,
|
event.room_version.msc3931_push_features,
|
||||||
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
|
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
|
||||||
self.hs.config.experimental.msc3966_exact_event_property_contains,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
users = rules_by_user.keys()
|
users = rules_by_user.keys()
|
||||||
|
@ -495,8 +485,6 @@ def _flatten_dict(
|
||||||
d: Union[EventBase, Mapping[str, Any]],
|
d: Union[EventBase, Mapping[str, Any]],
|
||||||
prefix: Optional[List[str]] = None,
|
prefix: Optional[List[str]] = None,
|
||||||
result: Optional[Dict[str, JsonValue]] = None,
|
result: Optional[Dict[str, JsonValue]] = None,
|
||||||
*,
|
|
||||||
msc3873_escape_event_match_key: bool = False,
|
|
||||||
) -> Dict[str, JsonValue]:
|
) -> Dict[str, JsonValue]:
|
||||||
"""
|
"""
|
||||||
Given a JSON dictionary (or event) which might contain sub dictionaries,
|
Given a JSON dictionary (or event) which might contain sub dictionaries,
|
||||||
|
@ -525,7 +513,6 @@ def _flatten_dict(
|
||||||
if result is None:
|
if result is None:
|
||||||
result = {}
|
result = {}
|
||||||
for key, value in d.items():
|
for key, value in d.items():
|
||||||
if msc3873_escape_event_match_key:
|
|
||||||
# Escape periods in the key with a backslash (and backslashes with an
|
# Escape periods in the key with a backslash (and backslashes with an
|
||||||
# extra backslash). This is since a period is used as a separator between
|
# extra backslash). This is since a period is used as a separator between
|
||||||
# nested fields.
|
# nested fields.
|
||||||
|
@ -537,12 +524,7 @@ def _flatten_dict(
|
||||||
result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)]
|
result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)]
|
||||||
elif isinstance(value, Mapping):
|
elif isinstance(value, Mapping):
|
||||||
# do not set `room_version` due to recursion considerations below
|
# do not set `room_version` due to recursion considerations below
|
||||||
_flatten_dict(
|
_flatten_dict(value, prefix=(prefix + [key]), result=result)
|
||||||
value,
|
|
||||||
prefix=(prefix + [key]),
|
|
||||||
result=result,
|
|
||||||
msc3873_escape_event_match_key=msc3873_escape_event_match_key,
|
|
||||||
)
|
|
||||||
|
|
||||||
# `room_version` should only ever be set when looking at the top level of an event
|
# `room_version` should only ever be set when looking at the top level of an event
|
||||||
if (
|
if (
|
||||||
|
|
|
@ -18,16 +18,12 @@ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.internet.defer import Deferred
|
from twisted.internet.defer import Deferred
|
||||||
from twisted.internet.interfaces import IAddress, IConnector
|
|
||||||
from twisted.internet.protocol import ReconnectingClientFactory
|
|
||||||
from twisted.python.failure import Failure
|
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership, ReceiptTypes
|
from synapse.api.constants import EventTypes, Membership, ReceiptTypes
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.federation.sender import FederationSender
|
from synapse.federation.sender import FederationSender
|
||||||
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
|
|
||||||
from synapse.replication.tcp.streams import (
|
from synapse.replication.tcp.streams import (
|
||||||
AccountDataStream,
|
AccountDataStream,
|
||||||
DeviceListsStream,
|
DeviceListsStream,
|
||||||
|
@ -53,7 +49,6 @@ from synapse.util.async_helpers import Linearizer, timeout_deferred
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.replication.tcp.handler import ReplicationCommandHandler
|
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -62,52 +57,6 @@ logger = logging.getLogger(__name__)
|
||||||
_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5
|
_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5
|
||||||
|
|
||||||
|
|
||||||
class DirectTcpReplicationClientFactory(ReconnectingClientFactory):
|
|
||||||
"""Factory for building connections to the master. Will reconnect if the
|
|
||||||
connection is lost.
|
|
||||||
|
|
||||||
Accepts a handler that is passed to `ClientReplicationStreamProtocol`.
|
|
||||||
"""
|
|
||||||
|
|
||||||
initialDelay = 0.1
|
|
||||||
maxDelay = 1 # Try at least once every N seconds
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
hs: "HomeServer",
|
|
||||||
client_name: str,
|
|
||||||
command_handler: "ReplicationCommandHandler",
|
|
||||||
):
|
|
||||||
self.client_name = client_name
|
|
||||||
self.command_handler = command_handler
|
|
||||||
self.server_name = hs.config.server.server_name
|
|
||||||
self.hs = hs
|
|
||||||
self._clock = hs.get_clock() # As self.clock is defined in super class
|
|
||||||
|
|
||||||
hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying)
|
|
||||||
|
|
||||||
def startedConnecting(self, connector: IConnector) -> None:
|
|
||||||
logger.info("Connecting to replication: %r", connector.getDestination())
|
|
||||||
|
|
||||||
def buildProtocol(self, addr: IAddress) -> ClientReplicationStreamProtocol:
|
|
||||||
logger.info("Connected to replication: %r", addr)
|
|
||||||
return ClientReplicationStreamProtocol(
|
|
||||||
self.hs,
|
|
||||||
self.client_name,
|
|
||||||
self.server_name,
|
|
||||||
self._clock,
|
|
||||||
self.command_handler,
|
|
||||||
)
|
|
||||||
|
|
||||||
def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None:
|
|
||||||
logger.error("Lost replication conn: %r", reason)
|
|
||||||
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
|
|
||||||
|
|
||||||
def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None:
|
|
||||||
logger.error("Failed to connect to replication: %r", reason)
|
|
||||||
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
|
|
||||||
|
|
||||||
|
|
||||||
class ReplicationDataHandler:
|
class ReplicationDataHandler:
|
||||||
"""Handles incoming stream updates from replication.
|
"""Handles incoming stream updates from replication.
|
||||||
|
|
||||||
|
|
|
@ -625,23 +625,6 @@ class ReplicationCommandHandler:
|
||||||
|
|
||||||
self._notifier.notify_remote_server_up(cmd.data)
|
self._notifier.notify_remote_server_up(cmd.data)
|
||||||
|
|
||||||
# We relay to all other connections to ensure every instance gets the
|
|
||||||
# notification.
|
|
||||||
#
|
|
||||||
# When configured to use redis we'll always only have one connection and
|
|
||||||
# so this is a no-op (all instances will have already received the same
|
|
||||||
# REMOTE_SERVER_UP command).
|
|
||||||
#
|
|
||||||
# For direct TCP connections this will relay to all other connections
|
|
||||||
# connected to us. When on master this will correctly fan out to all
|
|
||||||
# other direct TCP clients and on workers there'll only be the one
|
|
||||||
# connection to master.
|
|
||||||
#
|
|
||||||
# (The logic here should also be sound if we have a mix of Redis and
|
|
||||||
# direct TCP connections so long as there is only one traffic route
|
|
||||||
# between two instances, but that is not currently supported).
|
|
||||||
self.send_command(cmd, ignore_conn=conn)
|
|
||||||
|
|
||||||
def new_connection(self, connection: IReplicationConnection) -> None:
|
def new_connection(self, connection: IReplicationConnection) -> None:
|
||||||
"""Called when we have a new connection."""
|
"""Called when we have a new connection."""
|
||||||
self._connections.append(connection)
|
self._connections.append(connection)
|
||||||
|
@ -689,21 +672,14 @@ class ReplicationCommandHandler:
|
||||||
"""
|
"""
|
||||||
return bool(self._connections)
|
return bool(self._connections)
|
||||||
|
|
||||||
def send_command(
|
def send_command(self, cmd: Command) -> None:
|
||||||
self, cmd: Command, ignore_conn: Optional[IReplicationConnection] = None
|
|
||||||
) -> None:
|
|
||||||
"""Send a command to all connected connections.
|
"""Send a command to all connected connections.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
cmd
|
cmd
|
||||||
ignore_conn: If set don't send command to the given connection.
|
|
||||||
Used when relaying commands from one connection to all others.
|
|
||||||
"""
|
"""
|
||||||
if self._connections:
|
if self._connections:
|
||||||
for connection in self._connections:
|
for connection in self._connections:
|
||||||
if connection == ignore_conn:
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
connection.send_command(cmd)
|
connection.send_command(cmd)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
|
@ -20,6 +20,7 @@ from synapse.rest.client import (
|
||||||
account,
|
account,
|
||||||
account_data,
|
account_data,
|
||||||
account_validity,
|
account_validity,
|
||||||
|
appservice_ping,
|
||||||
auth,
|
auth,
|
||||||
capabilities,
|
capabilities,
|
||||||
devices,
|
devices,
|
||||||
|
@ -140,6 +141,7 @@ class ClientRestResource(JsonResource):
|
||||||
if is_main_process:
|
if is_main_process:
|
||||||
password_policy.register_servlets(hs, client_resource)
|
password_policy.register_servlets(hs, client_resource)
|
||||||
knock.register_servlets(hs, client_resource)
|
knock.register_servlets(hs, client_resource)
|
||||||
|
appservice_ping.register_servlets(hs, client_resource)
|
||||||
|
|
||||||
# moving to /_synapse/admin
|
# moving to /_synapse/admin
|
||||||
if is_main_process:
|
if is_main_process:
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from typing import TYPE_CHECKING, Awaitable, Optional, Tuple
|
from typing import TYPE_CHECKING, Optional, Tuple
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.errors import NotFoundError, SynapseError
|
from synapse.api.errors import NotFoundError, SynapseError
|
||||||
|
@ -23,10 +23,10 @@ from synapse.http.servlet import (
|
||||||
parse_json_object_from_request,
|
parse_json_object_from_request,
|
||||||
)
|
)
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.rest.admin import assert_requester_is_admin
|
from synapse.logging.opentracing import set_tag
|
||||||
from synapse.rest.admin._base import admin_patterns
|
from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
|
||||||
from synapse.rest.client.transactions import HttpTransactionCache
|
from synapse.rest.client.transactions import HttpTransactionCache
|
||||||
from synapse.types import JsonDict, UserID
|
from synapse.types import JsonDict, Requester, UserID
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
@ -70,10 +70,13 @@ class SendServerNoticeServlet(RestServlet):
|
||||||
self.__class__.__name__,
|
self.__class__.__name__,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def on_POST(
|
async def _do(
|
||||||
self, request: SynapseRequest, txn_id: Optional[str] = None
|
self,
|
||||||
|
request: SynapseRequest,
|
||||||
|
requester: Requester,
|
||||||
|
txn_id: Optional[str],
|
||||||
) -> Tuple[int, JsonDict]:
|
) -> Tuple[int, JsonDict]:
|
||||||
await assert_requester_is_admin(self.auth, request)
|
await assert_user_is_admin(self.auth, requester)
|
||||||
body = parse_json_object_from_request(request)
|
body = parse_json_object_from_request(request)
|
||||||
assert_params_in_dict(body, ("user_id", "content"))
|
assert_params_in_dict(body, ("user_id", "content"))
|
||||||
event_type = body.get("type", EventTypes.Message)
|
event_type = body.get("type", EventTypes.Message)
|
||||||
|
@ -106,9 +109,18 @@ class SendServerNoticeServlet(RestServlet):
|
||||||
|
|
||||||
return HTTPStatus.OK, {"event_id": event.event_id}
|
return HTTPStatus.OK, {"event_id": event.event_id}
|
||||||
|
|
||||||
def on_PUT(
|
async def on_POST(
|
||||||
|
self,
|
||||||
|
request: SynapseRequest,
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request)
|
||||||
|
return await self._do(request, requester, None)
|
||||||
|
|
||||||
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, txn_id: str
|
self, request: SynapseRequest, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Tuple[int, JsonDict]:
|
||||||
return self.txns.fetch_or_execute_request(
|
requester = await self.auth.get_user_by_req(request)
|
||||||
request, self.on_POST, request, txn_id
|
set_tag("txn_id", txn_id)
|
||||||
|
return await self.txns.fetch_or_execute_request(
|
||||||
|
request, requester, self._do, request, requester, txn_id
|
||||||
)
|
)
|
||||||
|
|
|
@ -683,20 +683,19 @@ class AccountValidityRenewServlet(RestServlet):
|
||||||
PATTERNS = admin_patterns("/account_validity/validity$")
|
PATTERNS = admin_patterns("/account_validity/validity$")
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.account_activity_handler = hs.get_account_validity_handler()
|
self.account_validity_handler = hs.get_account_validity_handler()
|
||||||
|
self.account_validity_module_callbacks = (
|
||||||
|
hs.get_module_api_callbacks().account_validity
|
||||||
|
)
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||||
await assert_requester_is_admin(self.auth, request)
|
await assert_requester_is_admin(self.auth, request)
|
||||||
|
|
||||||
if self.account_activity_handler.on_legacy_admin_request_callback:
|
if self.account_validity_module_callbacks.on_legacy_admin_request_callback:
|
||||||
expiration_ts = (
|
expiration_ts = await self.account_validity_module_callbacks.on_legacy_admin_request_callback(
|
||||||
await (
|
|
||||||
self.account_activity_handler.on_legacy_admin_request_callback(
|
|
||||||
request
|
request
|
||||||
)
|
)
|
||||||
)
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
body = parse_json_object_from_request(request)
|
body = parse_json_object_from_request(request)
|
||||||
|
|
||||||
|
@ -706,7 +705,7 @@ class AccountValidityRenewServlet(RestServlet):
|
||||||
"Missing property 'user_id' in the request body",
|
"Missing property 'user_id' in the request body",
|
||||||
)
|
)
|
||||||
|
|
||||||
expiration_ts = await self.account_activity_handler.renew_account_for_user(
|
expiration_ts = await self.account_validity_handler.renew_account_for_user(
|
||||||
body["user_id"],
|
body["user_id"],
|
||||||
body.get("expiration_ts"),
|
body.get("expiration_ts"),
|
||||||
not body.get("enable_renewal_emails", True),
|
not body.get("enable_renewal_emails", True),
|
||||||
|
|
|
@ -0,0 +1,115 @@
|
||||||
|
# Copyright 2023 Tulir Asokan
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from http import HTTPStatus
|
||||||
|
from typing import TYPE_CHECKING, Any, Dict, Tuple
|
||||||
|
|
||||||
|
from synapse.api.errors import (
|
||||||
|
CodeMessageException,
|
||||||
|
Codes,
|
||||||
|
HttpResponseException,
|
||||||
|
SynapseError,
|
||||||
|
)
|
||||||
|
from synapse.http import RequestTimedOutError
|
||||||
|
from synapse.http.server import HttpServer
|
||||||
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
|
from synapse.http.site import SynapseRequest
|
||||||
|
from synapse.types import JsonDict
|
||||||
|
|
||||||
|
from ._base import client_patterns
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AppservicePingRestServlet(RestServlet):
|
||||||
|
PATTERNS = client_patterns(
|
||||||
|
"/fi.mau.msc2659/appservice/(?P<appservice_id>[^/]*)/ping",
|
||||||
|
unstable=True,
|
||||||
|
releases=(),
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, hs: "HomeServer"):
|
||||||
|
super().__init__()
|
||||||
|
self.as_api = hs.get_application_service_api()
|
||||||
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
|
async def on_POST(
|
||||||
|
self, request: SynapseRequest, appservice_id: str
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request)
|
||||||
|
|
||||||
|
if not requester.app_service:
|
||||||
|
raise SynapseError(
|
||||||
|
HTTPStatus.FORBIDDEN,
|
||||||
|
"Only application services can use the /appservice/ping endpoint",
|
||||||
|
Codes.FORBIDDEN,
|
||||||
|
)
|
||||||
|
elif requester.app_service.id != appservice_id:
|
||||||
|
raise SynapseError(
|
||||||
|
HTTPStatus.FORBIDDEN,
|
||||||
|
"Mismatching application service ID in path",
|
||||||
|
Codes.FORBIDDEN,
|
||||||
|
)
|
||||||
|
elif not requester.app_service.url:
|
||||||
|
raise SynapseError(
|
||||||
|
HTTPStatus.BAD_REQUEST,
|
||||||
|
"The application service does not have a URL set",
|
||||||
|
Codes.AS_PING_URL_NOT_SET,
|
||||||
|
)
|
||||||
|
|
||||||
|
content = parse_json_object_from_request(request)
|
||||||
|
txn_id = content.get("transaction_id", None)
|
||||||
|
|
||||||
|
start = time.monotonic()
|
||||||
|
try:
|
||||||
|
await self.as_api.ping(requester.app_service, txn_id)
|
||||||
|
except RequestTimedOutError as e:
|
||||||
|
raise SynapseError(
|
||||||
|
HTTPStatus.GATEWAY_TIMEOUT,
|
||||||
|
e.msg,
|
||||||
|
Codes.AS_PING_CONNECTION_TIMEOUT,
|
||||||
|
)
|
||||||
|
except CodeMessageException as e:
|
||||||
|
additional_fields: Dict[str, Any] = {"status": e.code}
|
||||||
|
if isinstance(e, HttpResponseException):
|
||||||
|
try:
|
||||||
|
additional_fields["body"] = e.response.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
pass
|
||||||
|
raise SynapseError(
|
||||||
|
HTTPStatus.BAD_GATEWAY,
|
||||||
|
f"HTTP {e.code} {e.msg}",
|
||||||
|
Codes.AS_PING_BAD_STATUS,
|
||||||
|
additional_fields=additional_fields,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise SynapseError(
|
||||||
|
HTTPStatus.BAD_GATEWAY,
|
||||||
|
f"{type(e).__name__}: {e}",
|
||||||
|
Codes.AS_PING_CONNECTION_FAILED,
|
||||||
|
)
|
||||||
|
|
||||||
|
duration = time.monotonic() - start
|
||||||
|
|
||||||
|
return HTTPStatus.OK, {"duration": int(duration * 1000)}
|
||||||
|
|
||||||
|
|
||||||
|
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||||
|
if hs.config.experimental.msc2659_enabled:
|
||||||
|
AppservicePingRestServlet(hs).register(http_server)
|
|
@ -956,7 +956,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||||
if hs.config.worker.worker_app is None:
|
if hs.config.worker.worker_app is None:
|
||||||
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
|
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
|
||||||
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
|
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
|
||||||
UsernameAvailabilityRestServlet(hs).register(http_server)
|
|
||||||
RegistrationSubmitTokenServlet(hs).register(http_server)
|
RegistrationSubmitTokenServlet(hs).register(http_server)
|
||||||
|
UsernameAvailabilityRestServlet(hs).register(http_server)
|
||||||
RegistrationTokenValidityRestServlet(hs).register(http_server)
|
RegistrationTokenValidityRestServlet(hs).register(http_server)
|
||||||
RegisterRestServlet(hs).register(http_server)
|
RegisterRestServlet(hs).register(http_server)
|
||||||
|
|
|
@ -57,7 +57,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.rest.client._base import client_patterns
|
from synapse.rest.client._base import client_patterns
|
||||||
from synapse.rest.client.transactions import HttpTransactionCache
|
from synapse.rest.client.transactions import HttpTransactionCache
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
|
from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID
|
||||||
from synapse.types.state import StateFilter
|
from synapse.types.state import StateFilter
|
||||||
from synapse.util import json_decoder
|
from synapse.util import json_decoder
|
||||||
from synapse.util.cancellation import cancellable
|
from synapse.util.cancellation import cancellable
|
||||||
|
@ -151,15 +151,22 @@ class RoomCreateRestServlet(TransactionRestServlet):
|
||||||
PATTERNS = "/createRoom"
|
PATTERNS = "/createRoom"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
def on_PUT(
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, txn_id: str
|
self, request: SynapseRequest, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request)
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
return self.txns.fetch_or_execute_request(request, self.on_POST, request)
|
return await self.txns.fetch_or_execute_request(
|
||||||
|
request, requester, self._do, request, requester
|
||||||
|
)
|
||||||
|
|
||||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||||
requester = await self.auth.get_user_by_req(request)
|
requester = await self.auth.get_user_by_req(request)
|
||||||
|
return await self._do(request, requester)
|
||||||
|
|
||||||
|
async def _do(
|
||||||
|
self, request: SynapseRequest, requester: Requester
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
room_id, _, _ = await self._room_creation_handler.create_room(
|
room_id, _, _ = await self._room_creation_handler.create_room(
|
||||||
requester, self.get_room_config(request)
|
requester, self.get_room_config(request)
|
||||||
)
|
)
|
||||||
|
@ -172,9 +179,9 @@ class RoomCreateRestServlet(TransactionRestServlet):
|
||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing for generic events
|
# TODO: Needs unit testing for generic events
|
||||||
class RoomStateEventRestServlet(TransactionRestServlet):
|
class RoomStateEventRestServlet(RestServlet):
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
super().__init__(hs)
|
super().__init__()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
self.room_member_handler = hs.get_room_member_handler()
|
self.room_member_handler = hs.get_room_member_handler()
|
||||||
self.message_handler = hs.get_message_handler()
|
self.message_handler = hs.get_message_handler()
|
||||||
|
@ -324,16 +331,16 @@ class RoomSendEventRestServlet(TransactionRestServlet):
|
||||||
def register(self, http_server: HttpServer) -> None:
|
def register(self, http_server: HttpServer) -> None:
|
||||||
# /rooms/$roomid/send/$event_type[/$txn_id]
|
# /rooms/$roomid/send/$event_type[/$txn_id]
|
||||||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
|
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
|
||||||
register_txn_path(self, PATTERNS, http_server, with_get=True)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
async def on_POST(
|
async def _do(
|
||||||
self,
|
self,
|
||||||
request: SynapseRequest,
|
request: SynapseRequest,
|
||||||
|
requester: Requester,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
event_type: str,
|
event_type: str,
|
||||||
txn_id: Optional[str] = None,
|
txn_id: Optional[str],
|
||||||
) -> Tuple[int, JsonDict]:
|
) -> Tuple[int, JsonDict]:
|
||||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
event_dict: JsonDict = {
|
event_dict: JsonDict = {
|
||||||
|
@ -362,18 +369,30 @@ class RoomSendEventRestServlet(TransactionRestServlet):
|
||||||
set_tag("event_id", event_id)
|
set_tag("event_id", event_id)
|
||||||
return 200, {"event_id": event_id}
|
return 200, {"event_id": event_id}
|
||||||
|
|
||||||
def on_GET(
|
async def on_POST(
|
||||||
self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
|
self,
|
||||||
) -> Tuple[int, str]:
|
request: SynapseRequest,
|
||||||
return 200, "Not implemented"
|
room_id: str,
|
||||||
|
event_type: str,
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
return await self._do(request, requester, room_id, event_type, None)
|
||||||
|
|
||||||
def on_PUT(
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
|
self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
|
|
||||||
return self.txns.fetch_or_execute_request(
|
return await self.txns.fetch_or_execute_request(
|
||||||
request, self.on_POST, request, room_id, event_type, txn_id
|
request,
|
||||||
|
requester,
|
||||||
|
self._do,
|
||||||
|
request,
|
||||||
|
requester,
|
||||||
|
room_id,
|
||||||
|
event_type,
|
||||||
|
txn_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -389,14 +408,13 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||||
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
|
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
async def on_POST(
|
async def _do(
|
||||||
self,
|
self,
|
||||||
request: SynapseRequest,
|
request: SynapseRequest,
|
||||||
|
requester: Requester,
|
||||||
room_identifier: str,
|
room_identifier: str,
|
||||||
txn_id: Optional[str] = None,
|
txn_id: Optional[str],
|
||||||
) -> Tuple[int, JsonDict]:
|
) -> Tuple[int, JsonDict]:
|
||||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
|
|
||||||
content = parse_json_object_from_request(request, allow_empty_body=True)
|
content = parse_json_object_from_request(request, allow_empty_body=True)
|
||||||
|
|
||||||
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
|
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
|
||||||
|
@ -420,22 +438,31 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
|
||||||
|
|
||||||
return 200, {"room_id": room_id}
|
return 200, {"room_id": room_id}
|
||||||
|
|
||||||
def on_PUT(
|
async def on_POST(
|
||||||
|
self,
|
||||||
|
request: SynapseRequest,
|
||||||
|
room_identifier: str,
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
return await self._do(request, requester, room_identifier, None)
|
||||||
|
|
||||||
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, room_identifier: str, txn_id: str
|
self, request: SynapseRequest, room_identifier: str, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
|
|
||||||
return self.txns.fetch_or_execute_request(
|
return await self.txns.fetch_or_execute_request(
|
||||||
request, self.on_POST, request, room_identifier, txn_id
|
request, requester, self._do, request, requester, room_identifier, txn_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class PublicRoomListRestServlet(TransactionRestServlet):
|
class PublicRoomListRestServlet(RestServlet):
|
||||||
PATTERNS = client_patterns("/publicRooms$", v1=True)
|
PATTERNS = client_patterns("/publicRooms$", v1=True)
|
||||||
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
super().__init__(hs)
|
super().__init__()
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
|
|
||||||
|
@ -907,22 +934,25 @@ class RoomForgetRestServlet(TransactionRestServlet):
|
||||||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
|
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
async def on_POST(
|
async def _do(self, requester: Requester, room_id: str) -> Tuple[int, JsonDict]:
|
||||||
self, request: SynapseRequest, room_id: str, txn_id: Optional[str] = None
|
|
||||||
) -> Tuple[int, JsonDict]:
|
|
||||||
requester = await self.auth.get_user_by_req(request, allow_guest=False)
|
|
||||||
|
|
||||||
await self.room_member_handler.forget(user=requester.user, room_id=room_id)
|
await self.room_member_handler.forget(user=requester.user, room_id=room_id)
|
||||||
|
|
||||||
return 200, {}
|
return 200, {}
|
||||||
|
|
||||||
def on_PUT(
|
async def on_POST(
|
||||||
|
self, request: SynapseRequest, room_id: str
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=False)
|
||||||
|
return await self._do(requester, room_id)
|
||||||
|
|
||||||
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, room_id: str, txn_id: str
|
self, request: SynapseRequest, room_id: str, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=False)
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
|
|
||||||
return self.txns.fetch_or_execute_request(
|
return await self.txns.fetch_or_execute_request(
|
||||||
request, self.on_POST, request, room_id, txn_id
|
request, requester, self._do, requester, room_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -941,15 +971,14 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
||||||
)
|
)
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
async def on_POST(
|
async def _do(
|
||||||
self,
|
self,
|
||||||
request: SynapseRequest,
|
request: SynapseRequest,
|
||||||
|
requester: Requester,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
membership_action: str,
|
membership_action: str,
|
||||||
txn_id: Optional[str] = None,
|
txn_id: Optional[str],
|
||||||
) -> Tuple[int, JsonDict]:
|
) -> Tuple[int, JsonDict]:
|
||||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
|
||||||
|
|
||||||
if requester.is_guest and membership_action not in {
|
if requester.is_guest and membership_action not in {
|
||||||
Membership.JOIN,
|
Membership.JOIN,
|
||||||
Membership.LEAVE,
|
Membership.LEAVE,
|
||||||
|
@ -1014,13 +1043,30 @@ class RoomMembershipRestServlet(TransactionRestServlet):
|
||||||
|
|
||||||
return 200, return_value
|
return 200, return_value
|
||||||
|
|
||||||
def on_PUT(
|
async def on_POST(
|
||||||
|
self,
|
||||||
|
request: SynapseRequest,
|
||||||
|
room_id: str,
|
||||||
|
membership_action: str,
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
return await self._do(request, requester, room_id, membership_action, None)
|
||||||
|
|
||||||
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str
|
self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
|
|
||||||
return self.txns.fetch_or_execute_request(
|
return await self.txns.fetch_or_execute_request(
|
||||||
request, self.on_POST, request, room_id, membership_action, txn_id
|
request,
|
||||||
|
requester,
|
||||||
|
self._do,
|
||||||
|
request,
|
||||||
|
requester,
|
||||||
|
room_id,
|
||||||
|
membership_action,
|
||||||
|
txn_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1036,14 +1082,14 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
||||||
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
|
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
|
||||||
register_txn_path(self, PATTERNS, http_server)
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
async def on_POST(
|
async def _do(
|
||||||
self,
|
self,
|
||||||
request: SynapseRequest,
|
request: SynapseRequest,
|
||||||
|
requester: Requester,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
event_id: str,
|
event_id: str,
|
||||||
txn_id: Optional[str] = None,
|
txn_id: Optional[str],
|
||||||
) -> Tuple[int, JsonDict]:
|
) -> Tuple[int, JsonDict]:
|
||||||
requester = await self.auth.get_user_by_req(request)
|
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -1094,13 +1140,23 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
||||||
set_tag("event_id", event_id)
|
set_tag("event_id", event_id)
|
||||||
return 200, {"event_id": event_id}
|
return 200, {"event_id": event_id}
|
||||||
|
|
||||||
def on_PUT(
|
async def on_POST(
|
||||||
|
self,
|
||||||
|
request: SynapseRequest,
|
||||||
|
room_id: str,
|
||||||
|
event_id: str,
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request)
|
||||||
|
return await self._do(request, requester, room_id, event_id, None)
|
||||||
|
|
||||||
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str
|
self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
) -> Tuple[int, JsonDict]:
|
||||||
|
requester = await self.auth.get_user_by_req(request)
|
||||||
set_tag("txn_id", txn_id)
|
set_tag("txn_id", txn_id)
|
||||||
|
|
||||||
return self.txns.fetch_or_execute_request(
|
return await self.txns.fetch_or_execute_request(
|
||||||
request, self.on_POST, request, room_id, event_id, txn_id
|
request, requester, self._do, request, requester, room_id, event_id, txn_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1224,7 +1280,6 @@ def register_txn_path(
|
||||||
servlet: RestServlet,
|
servlet: RestServlet,
|
||||||
regex_string: str,
|
regex_string: str,
|
||||||
http_server: HttpServer,
|
http_server: HttpServer,
|
||||||
with_get: bool = False,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Registers a transaction-based path.
|
"""Registers a transaction-based path.
|
||||||
|
|
||||||
|
@ -1236,7 +1291,6 @@ def register_txn_path(
|
||||||
regex_string: The regex string to register. Must NOT have a
|
regex_string: The regex string to register. Must NOT have a
|
||||||
trailing $ as this string will be appended to.
|
trailing $ as this string will be appended to.
|
||||||
http_server: The http_server to register paths with.
|
http_server: The http_server to register paths with.
|
||||||
with_get: True to also register respective GET paths for the PUTs.
|
|
||||||
"""
|
"""
|
||||||
on_POST = getattr(servlet, "on_POST", None)
|
on_POST = getattr(servlet, "on_POST", None)
|
||||||
on_PUT = getattr(servlet, "on_PUT", None)
|
on_PUT = getattr(servlet, "on_PUT", None)
|
||||||
|
@ -1254,18 +1308,6 @@ def register_txn_path(
|
||||||
on_PUT,
|
on_PUT,
|
||||||
servlet.__class__.__name__,
|
servlet.__class__.__name__,
|
||||||
)
|
)
|
||||||
on_GET = getattr(servlet, "on_GET", None)
|
|
||||||
if with_get:
|
|
||||||
if on_GET is None:
|
|
||||||
raise RuntimeError(
|
|
||||||
"register_txn_path called with with_get = True, but no on_GET method exists"
|
|
||||||
)
|
|
||||||
http_server.register_paths(
|
|
||||||
"GET",
|
|
||||||
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
|
|
||||||
on_GET,
|
|
||||||
servlet.__class__.__name__,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TimestampLookupRestServlet(RestServlet):
|
class TimestampLookupRestServlet(RestServlet):
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Awaitable, Tuple
|
from typing import TYPE_CHECKING, Tuple
|
||||||
|
|
||||||
from synapse.http import servlet
|
from synapse.http import servlet
|
||||||
from synapse.http.server import HttpServer
|
from synapse.http.server import HttpServer
|
||||||
|
@ -21,7 +21,7 @@ from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_r
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.logging.opentracing import set_tag
|
from synapse.logging.opentracing import set_tag
|
||||||
from synapse.rest.client.transactions import HttpTransactionCache
|
from synapse.rest.client.transactions import HttpTransactionCache
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict, Requester
|
||||||
|
|
||||||
from ._base import client_patterns
|
from ._base import client_patterns
|
||||||
|
|
||||||
|
@ -43,19 +43,26 @@ class SendToDeviceRestServlet(servlet.RestServlet):
|
||||||
self.txns = HttpTransactionCache(hs)
|
self.txns = HttpTransactionCache(hs)
|
||||||
self.device_message_handler = hs.get_device_message_handler()
|
self.device_message_handler = hs.get_device_message_handler()
|
||||||
|
|
||||||
def on_PUT(
|
async def on_PUT(
|
||||||
self, request: SynapseRequest, message_type: str, txn_id: str
|
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
|
||||||
set_tag("txn_id", txn_id)
|
|
||||||
return self.txns.fetch_or_execute_request(
|
|
||||||
request, self._put, request, message_type, txn_id
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _put(
|
|
||||||
self, request: SynapseRequest, message_type: str, txn_id: str
|
self, request: SynapseRequest, message_type: str, txn_id: str
|
||||||
) -> Tuple[int, JsonDict]:
|
) -> Tuple[int, JsonDict]:
|
||||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||||
|
set_tag("txn_id", txn_id)
|
||||||
|
return await self.txns.fetch_or_execute_request(
|
||||||
|
request,
|
||||||
|
requester,
|
||||||
|
self._put,
|
||||||
|
request,
|
||||||
|
requester,
|
||||||
|
message_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _put(
|
||||||
|
self,
|
||||||
|
request: SynapseRequest,
|
||||||
|
requester: Requester,
|
||||||
|
message_type: str,
|
||||||
|
) -> Tuple[int, JsonDict]:
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
assert_params_in_dict(content, ("messages",))
|
assert_params_in_dict(content, ("messages",))
|
||||||
|
|
||||||
|
|
|
@ -15,16 +15,16 @@
|
||||||
"""This module contains logic for storing HTTP PUT transactions. This is used
|
"""This module contains logic for storing HTTP PUT transactions. This is used
|
||||||
to ensure idempotency when performing PUTs using the REST API."""
|
to ensure idempotency when performing PUTs using the REST API."""
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Tuple
|
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple
|
||||||
|
|
||||||
from typing_extensions import ParamSpec
|
from typing_extensions import ParamSpec
|
||||||
|
|
||||||
from twisted.internet.defer import Deferred
|
from twisted.internet.defer import Deferred
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from twisted.web.server import Request
|
from twisted.web.iweb import IRequest
|
||||||
|
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict, Requester
|
||||||
from synapse.util.async_helpers import ObservableDeferred
|
from synapse.util.async_helpers import ObservableDeferred
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
@ -41,53 +41,47 @@ P = ParamSpec("P")
|
||||||
class HttpTransactionCache:
|
class HttpTransactionCache:
|
||||||
def __init__(self, hs: "HomeServer"):
|
def __init__(self, hs: "HomeServer"):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.auth = self.hs.get_auth()
|
|
||||||
self.clock = self.hs.get_clock()
|
self.clock = self.hs.get_clock()
|
||||||
# $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
|
# $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
|
||||||
self.transactions: Dict[
|
self.transactions: Dict[
|
||||||
str, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int]
|
Hashable, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int]
|
||||||
] = {}
|
] = {}
|
||||||
# Try to clean entries every 30 mins. This means entries will exist
|
# Try to clean entries every 30 mins. This means entries will exist
|
||||||
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
|
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
|
||||||
self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
|
self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
|
||||||
|
|
||||||
def _get_transaction_key(self, request: Request) -> str:
|
def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable:
|
||||||
"""A helper function which returns a transaction key that can be used
|
"""A helper function which returns a transaction key that can be used
|
||||||
with TransactionCache for idempotent requests.
|
with TransactionCache for idempotent requests.
|
||||||
|
|
||||||
Idempotency is based on the returned key being the same for separate
|
Idempotency is based on the returned key being the same for separate
|
||||||
requests to the same endpoint. The key is formed from the HTTP request
|
requests to the same endpoint. The key is formed from the HTTP request
|
||||||
path and the access_token for the requesting user.
|
path and attributes from the requester: the access_token_id for regular users,
|
||||||
|
the user ID for guest users, and the appservice ID for appservice users.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: The incoming request. Must contain an access_token.
|
request: The incoming request.
|
||||||
|
requester: The requester doing the request.
|
||||||
Returns:
|
Returns:
|
||||||
A transaction key
|
A transaction key
|
||||||
"""
|
"""
|
||||||
assert request.path is not None
|
assert request.path is not None
|
||||||
token = self.auth.get_access_token_from_request(request)
|
path: str = request.path.decode("utf8")
|
||||||
return request.path.decode("utf8") + "/" + token
|
if requester.is_guest:
|
||||||
|
assert requester.user is not None, "Guest requester must have a user ID set"
|
||||||
|
return (path, "guest", requester.user)
|
||||||
|
elif requester.app_service is not None:
|
||||||
|
return (path, "appservice", requester.app_service.id)
|
||||||
|
else:
|
||||||
|
assert (
|
||||||
|
requester.access_token_id is not None
|
||||||
|
), "Requester must have an access_token_id"
|
||||||
|
return (path, "user", requester.access_token_id)
|
||||||
|
|
||||||
def fetch_or_execute_request(
|
def fetch_or_execute_request(
|
||||||
self,
|
self,
|
||||||
request: Request,
|
request: IRequest,
|
||||||
fn: Callable[P, Awaitable[Tuple[int, JsonDict]]],
|
requester: Requester,
|
||||||
*args: P.args,
|
|
||||||
**kwargs: P.kwargs,
|
|
||||||
) -> Awaitable[Tuple[int, JsonDict]]:
|
|
||||||
"""A helper function for fetch_or_execute which extracts
|
|
||||||
a transaction key from the given request.
|
|
||||||
|
|
||||||
See:
|
|
||||||
fetch_or_execute
|
|
||||||
"""
|
|
||||||
return self.fetch_or_execute(
|
|
||||||
self._get_transaction_key(request), fn, *args, **kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
def fetch_or_execute(
|
|
||||||
self,
|
|
||||||
txn_key: str,
|
|
||||||
fn: Callable[P, Awaitable[Tuple[int, JsonDict]]],
|
fn: Callable[P, Awaitable[Tuple[int, JsonDict]]],
|
||||||
*args: P.args,
|
*args: P.args,
|
||||||
**kwargs: P.kwargs,
|
**kwargs: P.kwargs,
|
||||||
|
@ -96,14 +90,15 @@ class HttpTransactionCache:
|
||||||
to produce a response for this transaction.
|
to produce a response for this transaction.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
txn_key: A key to ensure idempotency should fetch_or_execute be
|
request:
|
||||||
called again at a later point in time.
|
requester:
|
||||||
fn: A function which returns a tuple of (response_code, response_dict).
|
fn: A function which returns a tuple of (response_code, response_dict).
|
||||||
*args: Arguments to pass to fn.
|
*args: Arguments to pass to fn.
|
||||||
**kwargs: Keyword arguments to pass to fn.
|
**kwargs: Keyword arguments to pass to fn.
|
||||||
Returns:
|
Returns:
|
||||||
Deferred which resolves to a tuple of (response_code, response_dict).
|
Deferred which resolves to a tuple of (response_code, response_dict).
|
||||||
"""
|
"""
|
||||||
|
txn_key = self._get_transaction_key(request, requester)
|
||||||
if txn_key in self.transactions:
|
if txn_key in self.transactions:
|
||||||
observable = self.transactions[txn_key][0]
|
observable = self.transactions[txn_key][0]
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -109,6 +109,8 @@ class VersionsRestServlet(RestServlet):
|
||||||
"org.matrix.msc3773": self.config.experimental.msc3773_enabled,
|
"org.matrix.msc3773": self.config.experimental.msc3773_enabled,
|
||||||
# Allows moderators to fetch redacted event content as described in MSC2815
|
# Allows moderators to fetch redacted event content as described in MSC2815
|
||||||
"fi.mau.msc2815": self.config.experimental.msc2815_enabled,
|
"fi.mau.msc2815": self.config.experimental.msc2815_enabled,
|
||||||
|
# Adds a ping endpoint for appservices to check HS->AS connection
|
||||||
|
"fi.mau.msc2659": self.config.experimental.msc2659_enabled,
|
||||||
# Adds support for login token requests as per MSC3882
|
# Adds support for login token requests as per MSC3882
|
||||||
"org.matrix.msc3882": self.config.experimental.msc3882_enabled,
|
"org.matrix.msc3882": self.config.experimental.msc3882_enabled,
|
||||||
# Adds support for remotely enabling/disabling pushers, as per MSC3881
|
# Adds support for remotely enabling/disabling pushers, as per MSC3881
|
||||||
|
@ -120,6 +122,8 @@ class VersionsRestServlet(RestServlet):
|
||||||
is not None,
|
is not None,
|
||||||
# Adds support for relation-based redactions as per MSC3912.
|
# Adds support for relation-based redactions as per MSC3912.
|
||||||
"org.matrix.msc3912": self.config.experimental.msc3912_enabled,
|
"org.matrix.msc3912": self.config.experimental.msc3912_enabled,
|
||||||
|
# Adds support for unstable "intentional mentions" behaviour.
|
||||||
|
"org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -12,26 +12,9 @@
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import datetime
|
|
||||||
import errno
|
|
||||||
import fnmatch
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import shutil
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple
|
|
||||||
from urllib.parse import urljoin, urlparse, urlsplit
|
|
||||||
from urllib.request import urlopen
|
|
||||||
|
|
||||||
import attr
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from twisted.internet.defer import Deferred
|
|
||||||
from twisted.internet.error import DNSLookupError
|
|
||||||
|
|
||||||
from synapse.api.errors import Codes, SynapseError
|
|
||||||
from synapse.http.client import SimpleHttpClient
|
|
||||||
from synapse.http.server import (
|
from synapse.http.server import (
|
||||||
DirectServeJsonResource,
|
DirectServeJsonResource,
|
||||||
respond_with_json,
|
respond_with_json,
|
||||||
|
@ -39,71 +22,13 @@ from synapse.http.server import (
|
||||||
)
|
)
|
||||||
from synapse.http.servlet import parse_integer, parse_string
|
from synapse.http.servlet import parse_integer, parse_string
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.media._base import FileInfo, get_filename_from_headers
|
|
||||||
from synapse.media.media_storage import MediaStorage
|
from synapse.media.media_storage import MediaStorage
|
||||||
from synapse.media.oembed import OEmbedProvider
|
from synapse.media.url_previewer import UrlPreviewer
|
||||||
from synapse.media.preview_html import decode_body, parse_html_to_open_graph
|
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
|
||||||
from synapse.types import JsonDict, UserID
|
|
||||||
from synapse.util import json_encoder
|
|
||||||
from synapse.util.async_helpers import ObservableDeferred
|
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
|
||||||
from synapse.util.stringutils import random_string
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.media.media_repository import MediaRepository
|
from synapse.media.media_repository import MediaRepository
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
OG_TAG_NAME_MAXLEN = 50
|
|
||||||
OG_TAG_VALUE_MAXLEN = 1000
|
|
||||||
|
|
||||||
ONE_HOUR = 60 * 60 * 1000
|
|
||||||
ONE_DAY = 24 * ONE_HOUR
|
|
||||||
IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY
|
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
||||||
class DownloadResult:
|
|
||||||
length: int
|
|
||||||
uri: str
|
|
||||||
response_code: int
|
|
||||||
media_type: str
|
|
||||||
download_name: Optional[str]
|
|
||||||
expires: int
|
|
||||||
etag: Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
||||||
class MediaInfo:
|
|
||||||
"""
|
|
||||||
Information parsed from downloading media being previewed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# The Content-Type header of the response.
|
|
||||||
media_type: str
|
|
||||||
# The length (in bytes) of the downloaded media.
|
|
||||||
media_length: int
|
|
||||||
# The media filename, according to the server. This is parsed from the
|
|
||||||
# returned headers, if possible.
|
|
||||||
download_name: Optional[str]
|
|
||||||
# The time of the preview.
|
|
||||||
created_ts_ms: int
|
|
||||||
# Information from the media storage provider about where the file is stored
|
|
||||||
# on disk.
|
|
||||||
filesystem_id: str
|
|
||||||
filename: str
|
|
||||||
# The URI being previewed.
|
|
||||||
uri: str
|
|
||||||
# The HTTP response code.
|
|
||||||
response_code: int
|
|
||||||
# The timestamp (in milliseconds) of when this preview expires.
|
|
||||||
expires: int
|
|
||||||
# The ETag header of the response.
|
|
||||||
etag: Optional[str]
|
|
||||||
|
|
||||||
|
|
||||||
class PreviewUrlResource(DirectServeJsonResource):
|
class PreviewUrlResource(DirectServeJsonResource):
|
||||||
"""
|
"""
|
||||||
|
@ -121,54 +46,6 @@ class PreviewUrlResource(DirectServeJsonResource):
|
||||||
* The URL metadata must be stored somewhere, rather than just using Matrix
|
* The URL metadata must be stored somewhere, rather than just using Matrix
|
||||||
itself to store the media.
|
itself to store the media.
|
||||||
* Matrix cannot be used to distribute the metadata between homeservers.
|
* Matrix cannot be used to distribute the metadata between homeservers.
|
||||||
|
|
||||||
When Synapse is asked to preview a URL it does the following:
|
|
||||||
|
|
||||||
1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
|
|
||||||
config).
|
|
||||||
2. Checks the URL against an in-memory cache and returns the result if it exists. (This
|
|
||||||
is also used to de-duplicate processing of multiple in-flight requests at once.)
|
|
||||||
3. Kicks off a background process to generate a preview:
|
|
||||||
1. Checks URL and timestamp against the database cache and returns the result if it
|
|
||||||
has not expired and was successful (a 2xx return code).
|
|
||||||
2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it
|
|
||||||
does, update the URL to download.
|
|
||||||
3. Downloads the URL and stores it into a file via the media storage provider
|
|
||||||
and saves the local media metadata.
|
|
||||||
4. If the media is an image:
|
|
||||||
1. Generates thumbnails.
|
|
||||||
2. Generates an Open Graph response based on image properties.
|
|
||||||
5. If the media is HTML:
|
|
||||||
1. Decodes the HTML via the stored file.
|
|
||||||
2. Generates an Open Graph response from the HTML.
|
|
||||||
3. If a JSON oEmbed URL was found in the HTML via autodiscovery:
|
|
||||||
1. Downloads the URL and stores it into a file via the media storage provider
|
|
||||||
and saves the local media metadata.
|
|
||||||
2. Convert the oEmbed response to an Open Graph response.
|
|
||||||
3. Override any Open Graph data from the HTML with data from oEmbed.
|
|
||||||
4. If an image exists in the Open Graph response:
|
|
||||||
1. Downloads the URL and stores it into a file via the media storage
|
|
||||||
provider and saves the local media metadata.
|
|
||||||
2. Generates thumbnails.
|
|
||||||
3. Updates the Open Graph response based on image properties.
|
|
||||||
6. If the media is JSON and an oEmbed URL was found:
|
|
||||||
1. Convert the oEmbed response to an Open Graph response.
|
|
||||||
2. If a thumbnail or image is in the oEmbed response:
|
|
||||||
1. Downloads the URL and stores it into a file via the media storage
|
|
||||||
provider and saves the local media metadata.
|
|
||||||
2. Generates thumbnails.
|
|
||||||
3. Updates the Open Graph response based on image properties.
|
|
||||||
7. Stores the result in the database cache.
|
|
||||||
4. Returns the result.
|
|
||||||
|
|
||||||
If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or
|
|
||||||
image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole
|
|
||||||
does not fail. As much information as possible is returned.
|
|
||||||
|
|
||||||
The in-memory cache expires after 1 hour.
|
|
||||||
|
|
||||||
Expired entries in the database cache (and their associated media files) are
|
|
||||||
deleted every 10 seconds. The default expiration time is 1 hour from download.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
isLeaf = True
|
isLeaf = True
|
||||||
|
@ -183,48 +60,10 @@ class PreviewUrlResource(DirectServeJsonResource):
|
||||||
|
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
self.filepaths = media_repo.filepaths
|
|
||||||
self.max_spider_size = hs.config.media.max_spider_size
|
|
||||||
self.server_name = hs.hostname
|
|
||||||
self.store = hs.get_datastores().main
|
|
||||||
self.client = SimpleHttpClient(
|
|
||||||
hs,
|
|
||||||
treq_args={"browser_like_redirects": True},
|
|
||||||
ip_whitelist=hs.config.media.url_preview_ip_range_whitelist,
|
|
||||||
ip_blacklist=hs.config.media.url_preview_ip_range_blacklist,
|
|
||||||
use_proxy=True,
|
|
||||||
)
|
|
||||||
self.media_repo = media_repo
|
self.media_repo = media_repo
|
||||||
self.primary_base_path = media_repo.primary_base_path
|
|
||||||
self.media_storage = media_storage
|
self.media_storage = media_storage
|
||||||
|
|
||||||
self._oembed = OEmbedProvider(hs)
|
self._url_previewer = UrlPreviewer(hs, media_repo, media_storage)
|
||||||
|
|
||||||
# We run the background jobs if we're the instance specified (or no
|
|
||||||
# instance is specified, where we assume there is only one instance
|
|
||||||
# serving media).
|
|
||||||
instance_running_jobs = hs.config.media.media_instance_running_background_jobs
|
|
||||||
self._worker_run_media_background_jobs = (
|
|
||||||
instance_running_jobs is None
|
|
||||||
or instance_running_jobs == hs.get_instance_name()
|
|
||||||
)
|
|
||||||
|
|
||||||
self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist
|
|
||||||
self.url_preview_accept_language = hs.config.media.url_preview_accept_language
|
|
||||||
|
|
||||||
# memory cache mapping urls to an ObservableDeferred returning
|
|
||||||
# JSON-encoded OG metadata
|
|
||||||
self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache(
|
|
||||||
cache_name="url_previews",
|
|
||||||
clock=self.clock,
|
|
||||||
# don't spider URLs more often than once an hour
|
|
||||||
expiry_ms=ONE_HOUR,
|
|
||||||
)
|
|
||||||
|
|
||||||
if self._worker_run_media_background_jobs:
|
|
||||||
self._cleaner_loop = self.clock.looping_call(
|
|
||||||
self._start_expire_url_cache_data, 10 * 1000
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
|
async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
|
||||||
request.setHeader(b"Allow", b"OPTIONS, GET")
|
request.setHeader(b"Allow", b"OPTIONS, GET")
|
||||||
|
@ -238,632 +77,5 @@ class PreviewUrlResource(DirectServeJsonResource):
|
||||||
if ts is None:
|
if ts is None:
|
||||||
ts = self.clock.time_msec()
|
ts = self.clock.time_msec()
|
||||||
|
|
||||||
# XXX: we could move this into _do_preview if we wanted.
|
og = await self._url_previewer.preview(url, requester.user, ts)
|
||||||
url_tuple = urlsplit(url)
|
|
||||||
for entry in self.url_preview_url_blacklist:
|
|
||||||
match = True
|
|
||||||
for attrib in entry:
|
|
||||||
pattern = entry[attrib]
|
|
||||||
value = getattr(url_tuple, attrib)
|
|
||||||
logger.debug(
|
|
||||||
"Matching attrib '%s' with value '%s' against pattern '%s'",
|
|
||||||
attrib,
|
|
||||||
value,
|
|
||||||
pattern,
|
|
||||||
)
|
|
||||||
|
|
||||||
if value is None:
|
|
||||||
match = False
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Some attributes might not be parsed as strings by urlsplit (such as the
|
|
||||||
# port, which is parsed as an int). Because we use match functions that
|
|
||||||
# expect strings, we want to make sure that's what we give them.
|
|
||||||
value_str = str(value)
|
|
||||||
|
|
||||||
if pattern.startswith("^"):
|
|
||||||
if not re.match(pattern, value_str):
|
|
||||||
match = False
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
if not fnmatch.fnmatch(value_str, pattern):
|
|
||||||
match = False
|
|
||||||
continue
|
|
||||||
if match:
|
|
||||||
logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
|
|
||||||
raise SynapseError(
|
|
||||||
403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
|
|
||||||
)
|
|
||||||
|
|
||||||
# the in-memory cache:
|
|
||||||
# * ensures that only one request is active at a time
|
|
||||||
# * takes load off the DB for the thundering herds
|
|
||||||
# * also caches any failures (unlike the DB) so we don't keep
|
|
||||||
# requesting the same endpoint
|
|
||||||
|
|
||||||
observable = self._cache.get(url)
|
|
||||||
|
|
||||||
if not observable:
|
|
||||||
download = run_in_background(self._do_preview, url, requester.user, ts)
|
|
||||||
observable = ObservableDeferred(download, consumeErrors=True)
|
|
||||||
self._cache[url] = observable
|
|
||||||
else:
|
|
||||||
logger.info("Returning cached response")
|
|
||||||
|
|
||||||
og = await make_deferred_yieldable(observable.observe())
|
|
||||||
respond_with_json_bytes(request, 200, og, send_cors=True)
|
respond_with_json_bytes(request, 200, og, send_cors=True)
|
||||||
|
|
||||||
async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
|
|
||||||
"""Check the db, and download the URL and build a preview
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: The URL to preview.
|
|
||||||
user: The user requesting the preview.
|
|
||||||
ts: The timestamp requested for the preview.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
json-encoded og data
|
|
||||||
"""
|
|
||||||
# check the URL cache in the DB (which will also provide us with
|
|
||||||
# historical previews, if we have any)
|
|
||||||
cache_result = await self.store.get_url_cache(url, ts)
|
|
||||||
if (
|
|
||||||
cache_result
|
|
||||||
and cache_result["expires_ts"] > ts
|
|
||||||
and cache_result["response_code"] / 100 == 2
|
|
||||||
):
|
|
||||||
# It may be stored as text in the database, not as bytes (such as
|
|
||||||
# PostgreSQL). If so, encode it back before handing it on.
|
|
||||||
og = cache_result["og"]
|
|
||||||
if isinstance(og, str):
|
|
||||||
og = og.encode("utf8")
|
|
||||||
return og
|
|
||||||
|
|
||||||
# If this URL can be accessed via oEmbed, use that instead.
|
|
||||||
url_to_download = url
|
|
||||||
oembed_url = self._oembed.get_oembed_url(url)
|
|
||||||
if oembed_url:
|
|
||||||
url_to_download = oembed_url
|
|
||||||
|
|
||||||
media_info = await self._handle_url(url_to_download, user)
|
|
||||||
|
|
||||||
logger.debug("got media_info of '%s'", media_info)
|
|
||||||
|
|
||||||
# The number of milliseconds that the response should be considered valid.
|
|
||||||
expiration_ms = media_info.expires
|
|
||||||
author_name: Optional[str] = None
|
|
||||||
|
|
||||||
if _is_media(media_info.media_type):
|
|
||||||
file_id = media_info.filesystem_id
|
|
||||||
dims = await self.media_repo._generate_thumbnails(
|
|
||||||
None, file_id, file_id, media_info.media_type, url_cache=True
|
|
||||||
)
|
|
||||||
|
|
||||||
og = {
|
|
||||||
"og:description": media_info.download_name,
|
|
||||||
"og:image": f"mxc://{self.server_name}/{media_info.filesystem_id}",
|
|
||||||
"og:image:type": media_info.media_type,
|
|
||||||
"matrix:image:size": media_info.media_length,
|
|
||||||
}
|
|
||||||
|
|
||||||
if dims:
|
|
||||||
og["og:image:width"] = dims["width"]
|
|
||||||
og["og:image:height"] = dims["height"]
|
|
||||||
else:
|
|
||||||
logger.warning("Couldn't get dims for %s" % url)
|
|
||||||
|
|
||||||
# define our OG response for this media
|
|
||||||
elif _is_html(media_info.media_type):
|
|
||||||
# TODO: somehow stop a big HTML tree from exploding synapse's RAM
|
|
||||||
|
|
||||||
with open(media_info.filename, "rb") as file:
|
|
||||||
body = file.read()
|
|
||||||
|
|
||||||
tree = decode_body(body, media_info.uri, media_info.media_type)
|
|
||||||
if tree is not None:
|
|
||||||
# Check if this HTML document points to oEmbed information and
|
|
||||||
# defer to that.
|
|
||||||
oembed_url = self._oembed.autodiscover_from_html(tree)
|
|
||||||
og_from_oembed: JsonDict = {}
|
|
||||||
if oembed_url:
|
|
||||||
try:
|
|
||||||
oembed_info = await self._handle_url(
|
|
||||||
oembed_url, user, allow_data_urls=True
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
# Fetching the oEmbed info failed, don't block the entire URL preview.
|
|
||||||
logger.warning(
|
|
||||||
"oEmbed fetch failed during URL preview: %s errored with %s",
|
|
||||||
oembed_url,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
(
|
|
||||||
og_from_oembed,
|
|
||||||
author_name,
|
|
||||||
expiration_ms,
|
|
||||||
) = await self._handle_oembed_response(
|
|
||||||
url, oembed_info, expiration_ms
|
|
||||||
)
|
|
||||||
|
|
||||||
# Parse Open Graph information from the HTML in case the oEmbed
|
|
||||||
# response failed or is incomplete.
|
|
||||||
og_from_html = parse_html_to_open_graph(tree)
|
|
||||||
|
|
||||||
# Compile the Open Graph response by using the scraped
|
|
||||||
# information from the HTML and overlaying any information
|
|
||||||
# from the oEmbed response.
|
|
||||||
og = {**og_from_html, **og_from_oembed}
|
|
||||||
|
|
||||||
await self._precache_image_url(user, media_info, og)
|
|
||||||
else:
|
|
||||||
og = {}
|
|
||||||
|
|
||||||
elif oembed_url:
|
|
||||||
# Handle the oEmbed information.
|
|
||||||
og, author_name, expiration_ms = await self._handle_oembed_response(
|
|
||||||
url, media_info, expiration_ms
|
|
||||||
)
|
|
||||||
await self._precache_image_url(user, media_info, og)
|
|
||||||
|
|
||||||
else:
|
|
||||||
logger.warning("Failed to find any OG data in %s", url)
|
|
||||||
og = {}
|
|
||||||
|
|
||||||
# If we don't have a title but we have author_name, copy it as
|
|
||||||
# title
|
|
||||||
if not og.get("og:title") and author_name:
|
|
||||||
og["og:title"] = author_name
|
|
||||||
|
|
||||||
# filter out any stupidly long values
|
|
||||||
keys_to_remove = []
|
|
||||||
for k, v in og.items():
|
|
||||||
# values can be numeric as well as strings, hence the cast to str
|
|
||||||
if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN:
|
|
||||||
logger.warning(
|
|
||||||
"Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN]
|
|
||||||
)
|
|
||||||
keys_to_remove.append(k)
|
|
||||||
for k in keys_to_remove:
|
|
||||||
del og[k]
|
|
||||||
|
|
||||||
logger.debug("Calculated OG for %s as %s", url, og)
|
|
||||||
|
|
||||||
jsonog = json_encoder.encode(og)
|
|
||||||
|
|
||||||
# Cap the amount of time to consider a response valid.
|
|
||||||
expiration_ms = min(expiration_ms, ONE_DAY)
|
|
||||||
|
|
||||||
# store OG in history-aware DB cache
|
|
||||||
await self.store.store_url_cache(
|
|
||||||
url,
|
|
||||||
media_info.response_code,
|
|
||||||
media_info.etag,
|
|
||||||
media_info.created_ts_ms + expiration_ms,
|
|
||||||
jsonog,
|
|
||||||
media_info.filesystem_id,
|
|
||||||
media_info.created_ts_ms,
|
|
||||||
)
|
|
||||||
|
|
||||||
return jsonog.encode("utf8")
|
|
||||||
|
|
||||||
async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult:
|
|
||||||
"""
|
|
||||||
Fetches a remote URL and parses the headers.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: The URL to fetch.
|
|
||||||
output_stream: The stream to write the content to.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A tuple of:
|
|
||||||
Media length, URL downloaded, the HTTP response code,
|
|
||||||
the media type, the downloaded file name, the number of
|
|
||||||
milliseconds the result is valid for, the etag header.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
logger.debug("Trying to get preview for url '%s'", url)
|
|
||||||
length, headers, uri, code = await self.client.get_file(
|
|
||||||
url,
|
|
||||||
output_stream=output_stream,
|
|
||||||
max_size=self.max_spider_size,
|
|
||||||
headers={
|
|
||||||
b"Accept-Language": self.url_preview_accept_language,
|
|
||||||
# Use a custom user agent for the preview because some sites will only return
|
|
||||||
# Open Graph metadata to crawler user agents. Omit the Synapse version
|
|
||||||
# string to avoid leaking information.
|
|
||||||
b"User-Agent": [
|
|
||||||
"Synapse (bot; +https://github.com/matrix-org/synapse)"
|
|
||||||
],
|
|
||||||
},
|
|
||||||
is_allowed_content_type=_is_previewable,
|
|
||||||
)
|
|
||||||
except SynapseError:
|
|
||||||
# Pass SynapseErrors through directly, so that the servlet
|
|
||||||
# handler will return a SynapseError to the client instead of
|
|
||||||
# blank data or a 500.
|
|
||||||
raise
|
|
||||||
except DNSLookupError:
|
|
||||||
# DNS lookup returned no results
|
|
||||||
# Note: This will also be the case if one of the resolved IP
|
|
||||||
# addresses is blacklisted
|
|
||||||
raise SynapseError(
|
|
||||||
502,
|
|
||||||
"DNS resolution failure during URL preview generation",
|
|
||||||
Codes.UNKNOWN,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
# FIXME: pass through 404s and other error messages nicely
|
|
||||||
logger.warning("Error downloading %s: %r", url, e)
|
|
||||||
|
|
||||||
raise SynapseError(
|
|
||||||
500,
|
|
||||||
"Failed to download content: %s"
|
|
||||||
% (traceback.format_exception_only(sys.exc_info()[0], e),),
|
|
||||||
Codes.UNKNOWN,
|
|
||||||
)
|
|
||||||
|
|
||||||
if b"Content-Type" in headers:
|
|
||||||
media_type = headers[b"Content-Type"][0].decode("ascii")
|
|
||||||
else:
|
|
||||||
media_type = "application/octet-stream"
|
|
||||||
|
|
||||||
download_name = get_filename_from_headers(headers)
|
|
||||||
|
|
||||||
# FIXME: we should calculate a proper expiration based on the
|
|
||||||
# Cache-Control and Expire headers. But for now, assume 1 hour.
|
|
||||||
expires = ONE_HOUR
|
|
||||||
etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None
|
|
||||||
|
|
||||||
return DownloadResult(
|
|
||||||
length, uri, code, media_type, download_name, expires, etag
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _parse_data_url(
|
|
||||||
self, url: str, output_stream: BinaryIO
|
|
||||||
) -> DownloadResult:
|
|
||||||
"""
|
|
||||||
Parses a data: URL.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: The URL to parse.
|
|
||||||
output_stream: The stream to write the content to.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A tuple of:
|
|
||||||
Media length, URL downloaded, the HTTP response code,
|
|
||||||
the media type, the downloaded file name, the number of
|
|
||||||
milliseconds the result is valid for, the etag header.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
logger.debug("Trying to parse data url '%s'", url)
|
|
||||||
with urlopen(url) as url_info:
|
|
||||||
# TODO Can this be more efficient.
|
|
||||||
output_stream.write(url_info.read())
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning("Error parsing data: URL %s: %r", url, e)
|
|
||||||
|
|
||||||
raise SynapseError(
|
|
||||||
500,
|
|
||||||
"Failed to parse data URL: %s"
|
|
||||||
% (traceback.format_exception_only(sys.exc_info()[0], e),),
|
|
||||||
Codes.UNKNOWN,
|
|
||||||
)
|
|
||||||
|
|
||||||
return DownloadResult(
|
|
||||||
# Read back the length that has been written.
|
|
||||||
length=output_stream.tell(),
|
|
||||||
uri=url,
|
|
||||||
# If it was parsed, consider this a 200 OK.
|
|
||||||
response_code=200,
|
|
||||||
# urlopen shoves the media-type from the data URL into the content type
|
|
||||||
# header object.
|
|
||||||
media_type=url_info.headers.get_content_type(),
|
|
||||||
# Some features are not supported by data: URLs.
|
|
||||||
download_name=None,
|
|
||||||
expires=ONE_HOUR,
|
|
||||||
etag=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _handle_url(
|
|
||||||
self, url: str, user: UserID, allow_data_urls: bool = False
|
|
||||||
) -> MediaInfo:
|
|
||||||
"""
|
|
||||||
Fetches content from a URL and parses the result to generate a MediaInfo.
|
|
||||||
|
|
||||||
It uses the media storage provider to persist the fetched content and
|
|
||||||
stores the mapping into the database.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: The URL to fetch.
|
|
||||||
user: The user who ahs requested this URL.
|
|
||||||
allow_data_urls: True if data URLs should be allowed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A MediaInfo object describing the fetched content.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# TODO: we should probably honour robots.txt... except in practice
|
|
||||||
# we're most likely being explicitly triggered by a human rather than a
|
|
||||||
# bot, so are we really a robot?
|
|
||||||
|
|
||||||
file_id = datetime.date.today().isoformat() + "_" + random_string(16)
|
|
||||||
|
|
||||||
file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True)
|
|
||||||
|
|
||||||
with self.media_storage.store_into_file(file_info) as (f, fname, finish):
|
|
||||||
if url.startswith("data:"):
|
|
||||||
if not allow_data_urls:
|
|
||||||
raise SynapseError(
|
|
||||||
500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN
|
|
||||||
)
|
|
||||||
|
|
||||||
download_result = await self._parse_data_url(url, f)
|
|
||||||
else:
|
|
||||||
download_result = await self._download_url(url, f)
|
|
||||||
|
|
||||||
await finish()
|
|
||||||
|
|
||||||
try:
|
|
||||||
time_now_ms = self.clock.time_msec()
|
|
||||||
|
|
||||||
await self.store.store_local_media(
|
|
||||||
media_id=file_id,
|
|
||||||
media_type=download_result.media_type,
|
|
||||||
time_now_ms=time_now_ms,
|
|
||||||
upload_name=download_result.download_name,
|
|
||||||
media_length=download_result.length,
|
|
||||||
user_id=user,
|
|
||||||
url_cache=url,
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("Error handling downloaded %s: %r", url, e)
|
|
||||||
# TODO: we really ought to delete the downloaded file in this
|
|
||||||
# case, since we won't have recorded it in the db, and will
|
|
||||||
# therefore not expire it.
|
|
||||||
raise
|
|
||||||
|
|
||||||
return MediaInfo(
|
|
||||||
media_type=download_result.media_type,
|
|
||||||
media_length=download_result.length,
|
|
||||||
download_name=download_result.download_name,
|
|
||||||
created_ts_ms=time_now_ms,
|
|
||||||
filesystem_id=file_id,
|
|
||||||
filename=fname,
|
|
||||||
uri=download_result.uri,
|
|
||||||
response_code=download_result.response_code,
|
|
||||||
expires=download_result.expires,
|
|
||||||
etag=download_result.etag,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _precache_image_url(
|
|
||||||
self, user: UserID, media_info: MediaInfo, og: JsonDict
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Pre-cache the image (if one exists) for posterity
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user: The user requesting the preview.
|
|
||||||
media_info: The media being previewed.
|
|
||||||
og: The Open Graph dictionary. This is modified with image information.
|
|
||||||
"""
|
|
||||||
# If there's no image or it is blank, there's nothing to do.
|
|
||||||
if "og:image" not in og:
|
|
||||||
return
|
|
||||||
|
|
||||||
# Remove the raw image URL, this will be replaced with an MXC URL, if successful.
|
|
||||||
image_url = og.pop("og:image")
|
|
||||||
if not image_url:
|
|
||||||
return
|
|
||||||
|
|
||||||
# The image URL from the HTML might be relative to the previewed page,
|
|
||||||
# convert it to an URL which can be requested directly.
|
|
||||||
url_parts = urlparse(image_url)
|
|
||||||
if url_parts.scheme != "data":
|
|
||||||
image_url = urljoin(media_info.uri, image_url)
|
|
||||||
|
|
||||||
# FIXME: it might be cleaner to use the same flow as the main /preview_url
|
|
||||||
# request itself and benefit from the same caching etc. But for now we
|
|
||||||
# just rely on the caching on the master request to speed things up.
|
|
||||||
try:
|
|
||||||
image_info = await self._handle_url(image_url, user, allow_data_urls=True)
|
|
||||||
except Exception as e:
|
|
||||||
# Pre-caching the image failed, don't block the entire URL preview.
|
|
||||||
logger.warning(
|
|
||||||
"Pre-caching image failed during URL preview: %s errored with %s",
|
|
||||||
image_url,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
if _is_media(image_info.media_type):
|
|
||||||
# TODO: make sure we don't choke on white-on-transparent images
|
|
||||||
file_id = image_info.filesystem_id
|
|
||||||
dims = await self.media_repo._generate_thumbnails(
|
|
||||||
None, file_id, file_id, image_info.media_type, url_cache=True
|
|
||||||
)
|
|
||||||
if dims:
|
|
||||||
og["og:image:width"] = dims["width"]
|
|
||||||
og["og:image:height"] = dims["height"]
|
|
||||||
else:
|
|
||||||
logger.warning("Couldn't get dims for %s", image_url)
|
|
||||||
|
|
||||||
og["og:image"] = f"mxc://{self.server_name}/{image_info.filesystem_id}"
|
|
||||||
og["og:image:type"] = image_info.media_type
|
|
||||||
og["matrix:image:size"] = image_info.media_length
|
|
||||||
|
|
||||||
async def _handle_oembed_response(
|
|
||||||
self, url: str, media_info: MediaInfo, expiration_ms: int
|
|
||||||
) -> Tuple[JsonDict, Optional[str], int]:
|
|
||||||
"""
|
|
||||||
Parse the downloaded oEmbed info.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: The URL which is being previewed (not the one which was
|
|
||||||
requested).
|
|
||||||
media_info: The media being previewed.
|
|
||||||
expiration_ms: The length of time, in milliseconds, the media is valid for.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A tuple of:
|
|
||||||
The Open Graph dictionary, if the oEmbed info can be parsed.
|
|
||||||
The author name if it could be retrieved from oEmbed.
|
|
||||||
The (possibly updated) length of time, in milliseconds, the media is valid for.
|
|
||||||
"""
|
|
||||||
# If JSON was not returned, there's nothing to do.
|
|
||||||
if not _is_json(media_info.media_type):
|
|
||||||
return {}, None, expiration_ms
|
|
||||||
|
|
||||||
with open(media_info.filename, "rb") as file:
|
|
||||||
body = file.read()
|
|
||||||
|
|
||||||
oembed_response = self._oembed.parse_oembed_response(url, body)
|
|
||||||
open_graph_result = oembed_response.open_graph_result
|
|
||||||
|
|
||||||
# Use the cache age from the oEmbed result, if one was given.
|
|
||||||
if open_graph_result and oembed_response.cache_age is not None:
|
|
||||||
expiration_ms = oembed_response.cache_age
|
|
||||||
|
|
||||||
return open_graph_result, oembed_response.author_name, expiration_ms
|
|
||||||
|
|
||||||
def _start_expire_url_cache_data(self) -> Deferred:
|
|
||||||
return run_as_background_process(
|
|
||||||
"expire_url_cache_data", self._expire_url_cache_data
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _expire_url_cache_data(self) -> None:
|
|
||||||
"""Clean up expired url cache content, media and thumbnails."""
|
|
||||||
|
|
||||||
assert self._worker_run_media_background_jobs
|
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
|
||||||
|
|
||||||
logger.debug("Running url preview cache expiry")
|
|
||||||
|
|
||||||
def try_remove_parent_dirs(dirs: Iterable[str]) -> None:
|
|
||||||
"""Attempt to remove the given chain of parent directories
|
|
||||||
|
|
||||||
Args:
|
|
||||||
dirs: The list of directory paths to delete, with children appearing
|
|
||||||
before their parents.
|
|
||||||
"""
|
|
||||||
for dir in dirs:
|
|
||||||
try:
|
|
||||||
os.rmdir(dir)
|
|
||||||
except FileNotFoundError:
|
|
||||||
# Already deleted, continue with deleting the rest
|
|
||||||
pass
|
|
||||||
except OSError as e:
|
|
||||||
# Failed, skip deleting the rest of the parent dirs
|
|
||||||
if e.errno != errno.ENOTEMPTY:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to remove media directory while clearing url preview cache: %r: %s",
|
|
||||||
dir,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
break
|
|
||||||
|
|
||||||
# First we delete expired url cache entries
|
|
||||||
media_ids = await self.store.get_expired_url_cache(now)
|
|
||||||
|
|
||||||
removed_media = []
|
|
||||||
for media_id in media_ids:
|
|
||||||
fname = self.filepaths.url_cache_filepath(media_id)
|
|
||||||
try:
|
|
||||||
os.remove(fname)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass # If the path doesn't exist, meh
|
|
||||||
except OSError as e:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to remove media while clearing url preview cache: %r: %s",
|
|
||||||
media_id,
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
removed_media.append(media_id)
|
|
||||||
|
|
||||||
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
|
||||||
try_remove_parent_dirs(dirs)
|
|
||||||
|
|
||||||
await self.store.delete_url_cache(removed_media)
|
|
||||||
|
|
||||||
if removed_media:
|
|
||||||
logger.debug(
|
|
||||||
"Deleted %d entries from url preview cache", len(removed_media)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.debug("No entries removed from url preview cache")
|
|
||||||
|
|
||||||
# Now we delete old images associated with the url cache.
|
|
||||||
# These may be cached for a bit on the client (i.e., they
|
|
||||||
# may have a room open with a preview url thing open).
|
|
||||||
# So we wait a couple of days before deleting, just in case.
|
|
||||||
expire_before = now - IMAGE_CACHE_EXPIRY_MS
|
|
||||||
media_ids = await self.store.get_url_cache_media_before(expire_before)
|
|
||||||
|
|
||||||
removed_media = []
|
|
||||||
for media_id in media_ids:
|
|
||||||
fname = self.filepaths.url_cache_filepath(media_id)
|
|
||||||
try:
|
|
||||||
os.remove(fname)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass # If the path doesn't exist, meh
|
|
||||||
except OSError as e:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to remove media from url preview cache: %r: %s", media_id, e
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id)
|
|
||||||
try_remove_parent_dirs(dirs)
|
|
||||||
|
|
||||||
thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id)
|
|
||||||
try:
|
|
||||||
shutil.rmtree(thumbnail_dir)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass # If the path doesn't exist, meh
|
|
||||||
except OSError as e:
|
|
||||||
logger.warning(
|
|
||||||
"Failed to remove media from url preview cache: %r: %s", media_id, e
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
removed_media.append(media_id)
|
|
||||||
|
|
||||||
dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id)
|
|
||||||
# Note that one of the directories to be deleted has already been
|
|
||||||
# removed by the `rmtree` above.
|
|
||||||
try_remove_parent_dirs(dirs)
|
|
||||||
|
|
||||||
await self.store.delete_url_cache_media(removed_media)
|
|
||||||
|
|
||||||
if removed_media:
|
|
||||||
logger.debug("Deleted %d media from url preview cache", len(removed_media))
|
|
||||||
else:
|
|
||||||
logger.debug("No media removed from url preview cache")
|
|
||||||
|
|
||||||
|
|
||||||
def _is_media(content_type: str) -> bool:
|
|
||||||
return content_type.lower().startswith("image/")
|
|
||||||
|
|
||||||
|
|
||||||
def _is_html(content_type: str) -> bool:
|
|
||||||
content_type = content_type.lower()
|
|
||||||
return content_type.startswith("text/html") or content_type.startswith(
|
|
||||||
"application/xhtml"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _is_json(content_type: str) -> bool:
|
|
||||||
return content_type.lower().startswith("application/json")
|
|
||||||
|
|
||||||
|
|
||||||
def _is_previewable(content_type: str) -> bool:
|
|
||||||
"""Returns True for content types for which we will perform URL preview and False
|
|
||||||
otherwise."""
|
|
||||||
|
|
||||||
return _is_html(content_type) or _is_media(content_type) or _is_json(content_type)
|
|
||||||
|
|
|
@ -23,6 +23,8 @@ import functools
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast
|
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast
|
||||||
|
|
||||||
|
from typing_extensions import TypeAlias
|
||||||
|
|
||||||
from twisted.internet.interfaces import IOpenSSLContextFactory
|
from twisted.internet.interfaces import IOpenSSLContextFactory
|
||||||
from twisted.internet.tcp import Port
|
from twisted.internet.tcp import Port
|
||||||
from twisted.web.iweb import IPolicyForHTTPS
|
from twisted.web.iweb import IPolicyForHTTPS
|
||||||
|
@ -108,6 +110,7 @@ from synapse.http.matrixfederationclient import MatrixFederationHttpClient
|
||||||
from synapse.media.media_repository import MediaRepository
|
from synapse.media.media_repository import MediaRepository
|
||||||
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
|
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
|
||||||
from synapse.module_api import ModuleApi
|
from synapse.module_api import ModuleApi
|
||||||
|
from synapse.module_api.callbacks import ModuleApiCallbacks
|
||||||
from synapse.notifier import Notifier, ReplicationNotifier
|
from synapse.notifier import Notifier, ReplicationNotifier
|
||||||
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
|
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
|
||||||
from synapse.push.pusherpool import PusherPool
|
from synapse.push.pusherpool import PusherPool
|
||||||
|
@ -142,10 +145,31 @@ if TYPE_CHECKING:
|
||||||
from synapse.handlers.saml import SamlHandler
|
from synapse.handlers.saml import SamlHandler
|
||||||
|
|
||||||
|
|
||||||
T = TypeVar("T")
|
# The annotation for `cache_in_self` used to be
|
||||||
|
# def (builder: Callable[["HomeServer"],T]) -> Callable[["HomeServer"],T]
|
||||||
|
# which mypy was happy with.
|
||||||
|
#
|
||||||
|
# But PyCharm was confused by this. If `foo` was decorated by `@cache_in_self`, then
|
||||||
|
# an expression like `hs.foo()`
|
||||||
|
#
|
||||||
|
# - would erroneously warn that we hadn't provided a `hs` argument to foo (PyCharm
|
||||||
|
# confused about boundmethods and unbound methods?), and
|
||||||
|
# - would be considered to have type `Any`, making for a poor autocomplete and
|
||||||
|
# cross-referencing experience.
|
||||||
|
#
|
||||||
|
# Instead, use a typevar `F` to express that `@cache_in_self` returns exactly the
|
||||||
|
# same type it receives. This isn't strictly true [*], but it's more than good
|
||||||
|
# enough to keep PyCharm and mypy happy.
|
||||||
|
#
|
||||||
|
# [*]: (e.g. `builder` could be an object with a __call__ attribute rather than a
|
||||||
|
# types.FunctionType instance, whereas the return value is always a
|
||||||
|
# types.FunctionType instance.)
|
||||||
|
|
||||||
|
T: TypeAlias = object
|
||||||
|
F = TypeVar("F", bound=Callable[["HomeServer"], T])
|
||||||
|
|
||||||
|
|
||||||
def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer"], T]:
|
def cache_in_self(builder: F) -> F:
|
||||||
"""Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and
|
"""Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and
|
||||||
returning if so. If not, calls the given function and sets `self.foo` to it.
|
returning if so. If not, calls the given function and sets `self.foo` to it.
|
||||||
|
|
||||||
|
@ -183,7 +207,7 @@ def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer
|
||||||
|
|
||||||
return dep
|
return dep
|
||||||
|
|
||||||
return _get
|
return cast(F, _get)
|
||||||
|
|
||||||
|
|
||||||
class HomeServer(metaclass=abc.ABCMeta):
|
class HomeServer(metaclass=abc.ABCMeta):
|
||||||
|
@ -777,6 +801,10 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||||
def get_module_api(self) -> ModuleApi:
|
def get_module_api(self) -> ModuleApi:
|
||||||
return ModuleApi(self, self.get_auth_handler())
|
return ModuleApi(self, self.get_auth_handler())
|
||||||
|
|
||||||
|
@cache_in_self
|
||||||
|
def get_module_api_callbacks(self) -> ModuleApiCallbacks:
|
||||||
|
return ModuleApiCallbacks()
|
||||||
|
|
||||||
@cache_in_self
|
@cache_in_self
|
||||||
def get_account_data_handler(self) -> AccountDataHandler:
|
def get_account_data_handler(self) -> AccountDataHandler:
|
||||||
return AccountDataHandler(self)
|
return AccountDataHandler(self)
|
||||||
|
|
|
@ -16,6 +16,7 @@ import itertools
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Set
|
from typing import TYPE_CHECKING, Set
|
||||||
|
|
||||||
|
from synapse.logging.context import nested_logging_context
|
||||||
from synapse.storage.databases import Databases
|
from synapse.storage.databases import Databases
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
@ -33,6 +34,7 @@ class PurgeEventsStorageController:
|
||||||
async def purge_room(self, room_id: str) -> None:
|
async def purge_room(self, room_id: str) -> None:
|
||||||
"""Deletes all record of a room"""
|
"""Deletes all record of a room"""
|
||||||
|
|
||||||
|
with nested_logging_context(room_id):
|
||||||
state_groups_to_delete = await self.stores.main.purge_room(room_id)
|
state_groups_to_delete = await self.stores.main.purge_room(room_id)
|
||||||
await self.stores.state.purge_room_state(room_id, state_groups_to_delete)
|
await self.stores.state.purge_room_state(room_id, state_groups_to_delete)
|
||||||
|
|
||||||
|
@ -51,15 +53,17 @@ class PurgeEventsStorageController:
|
||||||
(instead of just marking them as outliers and deleting their
|
(instead of just marking them as outliers and deleting their
|
||||||
state groups).
|
state groups).
|
||||||
"""
|
"""
|
||||||
|
with nested_logging_context(room_id):
|
||||||
state_groups = await self.stores.main.purge_history(
|
state_groups = await self.stores.main.purge_history(
|
||||||
room_id, token, delete_local_events
|
room_id, token, delete_local_events
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("[purge] finding state groups that can be deleted")
|
logger.info("[purge] finding state groups that can be deleted")
|
||||||
|
|
||||||
sg_to_delete = await self._find_unreferenced_groups(state_groups)
|
sg_to_delete = await self._find_unreferenced_groups(state_groups)
|
||||||
|
|
||||||
await self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete)
|
await self.stores.state.purge_unreferenced_state_groups(
|
||||||
|
room_id, sg_to_delete
|
||||||
|
)
|
||||||
|
|
||||||
async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]:
|
async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]:
|
||||||
"""Used when purging history to figure out which state groups can be
|
"""Used when purging history to figure out which state groups can be
|
||||||
|
|
|
@ -34,6 +34,7 @@ from typing import (
|
||||||
Tuple,
|
Tuple,
|
||||||
Type,
|
Type,
|
||||||
TypeVar,
|
TypeVar,
|
||||||
|
Union,
|
||||||
cast,
|
cast,
|
||||||
overload,
|
overload,
|
||||||
)
|
)
|
||||||
|
@ -100,6 +101,15 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class _PoolConnection(Connection):
|
||||||
|
"""
|
||||||
|
A Connection from twisted.enterprise.adbapi.Connection.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def reconnect(self) -> None:
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
def make_pool(
|
def make_pool(
|
||||||
reactor: IReactorCore,
|
reactor: IReactorCore,
|
||||||
db_config: DatabaseConnectionConfig,
|
db_config: DatabaseConnectionConfig,
|
||||||
|
@ -856,7 +866,8 @@ class DatabasePool:
|
||||||
try:
|
try:
|
||||||
with opentracing.start_active_span(f"db.{desc}"):
|
with opentracing.start_active_span(f"db.{desc}"):
|
||||||
result = await self.runWithConnection(
|
result = await self.runWithConnection(
|
||||||
self.new_transaction,
|
# mypy seems to have an issue with this, maybe a bug?
|
||||||
|
self.new_transaction, # type: ignore[arg-type]
|
||||||
desc,
|
desc,
|
||||||
after_callbacks,
|
after_callbacks,
|
||||||
async_after_callbacks,
|
async_after_callbacks,
|
||||||
|
@ -892,7 +903,7 @@ class DatabasePool:
|
||||||
|
|
||||||
async def runWithConnection(
|
async def runWithConnection(
|
||||||
self,
|
self,
|
||||||
func: Callable[..., R],
|
func: Callable[Concatenate[LoggingDatabaseConnection, P], R],
|
||||||
*args: Any,
|
*args: Any,
|
||||||
db_autocommit: bool = False,
|
db_autocommit: bool = False,
|
||||||
isolation_level: Optional[int] = None,
|
isolation_level: Optional[int] = None,
|
||||||
|
@ -926,7 +937,7 @@ class DatabasePool:
|
||||||
|
|
||||||
start_time = monotonic_time()
|
start_time = monotonic_time()
|
||||||
|
|
||||||
def inner_func(conn, *args, **kwargs):
|
def inner_func(conn: _PoolConnection, *args: P.args, **kwargs: P.kwargs) -> R:
|
||||||
# We shouldn't be in a transaction. If we are then something
|
# We shouldn't be in a transaction. If we are then something
|
||||||
# somewhere hasn't committed after doing work. (This is likely only
|
# somewhere hasn't committed after doing work. (This is likely only
|
||||||
# possible during startup, as `run*` will ensure changes are
|
# possible during startup, as `run*` will ensure changes are
|
||||||
|
@ -1019,7 +1030,7 @@ class DatabasePool:
|
||||||
decoder: Optional[Callable[[Cursor], R]],
|
decoder: Optional[Callable[[Cursor], R]],
|
||||||
query: str,
|
query: str,
|
||||||
*args: Any,
|
*args: Any,
|
||||||
) -> R:
|
) -> Union[List[Tuple[Any, ...]], R]:
|
||||||
"""Runs a single query for a result set.
|
"""Runs a single query for a result set.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -1032,7 +1043,7 @@ class DatabasePool:
|
||||||
The result of decoder(results)
|
The result of decoder(results)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def interaction(txn):
|
def interaction(txn: LoggingTransaction) -> Union[List[Tuple[Any, ...]], R]:
|
||||||
txn.execute(query, args)
|
txn.execute(query, args)
|
||||||
if decoder:
|
if decoder:
|
||||||
return decoder(txn)
|
return decoder(txn)
|
||||||
|
|
|
@ -325,6 +325,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
# We then run the same purge a second time without this isolation level to
|
# We then run the same purge a second time without this isolation level to
|
||||||
# purge any of those rows which were added during the first.
|
# purge any of those rows which were added during the first.
|
||||||
|
|
||||||
|
logger.info("[purge] Starting initial main purge of [1/2]")
|
||||||
state_groups_to_delete = await self.db_pool.runInteraction(
|
state_groups_to_delete = await self.db_pool.runInteraction(
|
||||||
"purge_room",
|
"purge_room",
|
||||||
self._purge_room_txn,
|
self._purge_room_txn,
|
||||||
|
@ -332,6 +333,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
isolation_level=IsolationLevel.READ_COMMITTED,
|
isolation_level=IsolationLevel.READ_COMMITTED,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logger.info("[purge] Starting secondary main purge of [2/2]")
|
||||||
state_groups_to_delete.extend(
|
state_groups_to_delete.extend(
|
||||||
await self.db_pool.runInteraction(
|
await self.db_pool.runInteraction(
|
||||||
"purge_room",
|
"purge_room",
|
||||||
|
@ -339,6 +341,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
logger.info("[purge] Done with main purge")
|
||||||
|
|
||||||
return state_groups_to_delete
|
return state_groups_to_delete
|
||||||
|
|
||||||
|
@ -376,7 +379,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
)
|
)
|
||||||
referenced_chain_id_tuples = list(txn)
|
referenced_chain_id_tuples = list(txn)
|
||||||
|
|
||||||
logger.info("[purge] removing events from event_auth_chain_links")
|
logger.info("[purge] removing from event_auth_chain_links")
|
||||||
txn.executemany(
|
txn.executemany(
|
||||||
"""
|
"""
|
||||||
DELETE FROM event_auth_chain_links WHERE
|
DELETE FROM event_auth_chain_links WHERE
|
||||||
|
@ -399,7 +402,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
"rejections",
|
"rejections",
|
||||||
"state_events",
|
"state_events",
|
||||||
):
|
):
|
||||||
logger.info("[purge] removing %s from %s", room_id, table)
|
logger.info("[purge] removing from %s", table)
|
||||||
|
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"""
|
"""
|
||||||
|
@ -454,7 +457,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
# happy
|
# happy
|
||||||
"rooms",
|
"rooms",
|
||||||
):
|
):
|
||||||
logger.info("[purge] removing %s from %s", room_id, table)
|
logger.info("[purge] removing from %s", table)
|
||||||
txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
|
txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
|
||||||
|
|
||||||
# Other tables we do NOT need to clear out:
|
# Other tables we do NOT need to clear out:
|
||||||
|
@ -486,6 +489,4 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
|
||||||
# that already exist.
|
# that already exist.
|
||||||
self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
|
self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
|
||||||
|
|
||||||
logger.info("[purge] done")
|
|
||||||
|
|
||||||
return state_groups
|
return state_groups
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue