Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes
This commit is contained in:
commit
1ea1a0b251
|
@ -13,9 +13,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.71"
|
||||
version = "1.0.72"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
|
||||
checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
|
@ -229,9 +229,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pyo3-log"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c94ff6535a6bae58d7d0b85e60d4c53f7f84d0d0aa35d6a28c3f3e70bfe51444"
|
||||
checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
|
@ -352,9 +352,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.100"
|
||||
version = "1.0.103"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c"
|
||||
checksum = "d03b412469450d4404fe8499a268edd7f8b79fecb074b0d812ad64ca21f4031b"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
|
@ -0,0 +1 @@
|
|||
Fix long-standing bug where remote invites weren't correctly pushed.
|
|
@ -0,0 +1 @@
|
|||
Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it.
|
|
@ -0,0 +1 @@
|
|||
Fix background schema updates failing over a large upgrade gap.
|
|
@ -0,0 +1 @@
|
|||
Document which Python version runs on a given Linux distribution so we can more easily clean up later.
|
|
@ -0,0 +1 @@
|
|||
Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009).
|
|
@ -0,0 +1 @@
|
|||
Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820).
|
|
@ -0,0 +1 @@
|
|||
Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`.
|
|
@ -0,0 +1 @@
|
|||
Better clarify how to run a worker instance (pass both configs).
|
|
@ -0,0 +1 @@
|
|||
Add details to warning in log when we fail to fetch an alias.
|
|
@ -0,0 +1 @@
|
|||
Add Unix Socket support for HTTP Replication Listeners. Document and provide usage instructions for utilizing Unix sockets in Synapse. Contributed by Jason Little.
|
|
@ -0,0 +1 @@
|
|||
Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting.
|
|
@ -0,0 +1 @@
|
|||
Remove unneeded `__init__`.
|
|
@ -0,0 +1 @@
|
|||
Remove support for calling the `/register` endpoint with an unspecced `user` property for application services.
|
|
@ -0,0 +1 @@
|
|||
Fix bug with read/write lock implementation. This is currently unused so has no observable effects.
|
|
@ -0,0 +1 @@
|
|||
Improve the documentation for the login as a user admin API.
|
|
@ -0,0 +1 @@
|
|||
Unbreak the nix development environment by pinning the Rust version to 1.70.0.
|
|
@ -0,0 +1 @@
|
|||
Update presence metrics to differentiate remote vs local users.
|
|
@ -0,0 +1 @@
|
|||
Fix bug with read/write lock implementation. This is currently unused so has no observable effects.
|
|
@ -0,0 +1 @@
|
|||
Ensure a long state res does not starve CPU by occasionally yielding to the reactor.
|
|
@ -0,0 +1 @@
|
|||
Reduce the amount of state we pull out.
|
|
@ -35,7 +35,11 @@ server {
|
|||
|
||||
# Send all other traffic to the main process
|
||||
location ~* ^(\\/_matrix|\\/_synapse) {
|
||||
{% if using_unix_sockets %}
|
||||
proxy_pass http://unix:/run/main_public.sock;
|
||||
{% else %}
|
||||
proxy_pass http://localhost:8080;
|
||||
{% endif %}
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
{% if enable_redis %}
|
||||
redis:
|
||||
enabled: true
|
||||
{% if using_unix_sockets %}
|
||||
path: /tmp/redis.sock
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if appservice_registrations is not none %}
|
||||
|
|
|
@ -19,7 +19,11 @@ username=www-data
|
|||
autorestart=true
|
||||
|
||||
[program:redis]
|
||||
{% if using_unix_sockets %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
|
||||
{% else %}
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
|
||||
{% endif %}
|
||||
priority=1
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
|
|
@ -8,7 +8,11 @@ worker_name: "{{ name }}"
|
|||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
{% if using_unix_sockets %}
|
||||
path: "/run/worker.{{ port }}"
|
||||
{% else %}
|
||||
port: {{ port }}
|
||||
{% endif %}
|
||||
{% if listener_resources %}
|
||||
resources:
|
||||
- names:
|
||||
|
|
|
@ -36,12 +36,17 @@ listeners:
|
|||
|
||||
# Allow configuring in case we want to reverse proxy 8008
|
||||
# using another process in the same container
|
||||
{% if SYNAPSE_USE_UNIX_SOCKET %}
|
||||
# Unix sockets don't care about TLS or IP addresses or ports
|
||||
- path: '/run/main_public.sock'
|
||||
type: http
|
||||
{% else %}
|
||||
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
|
||||
tls: false
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
|
||||
{% endif %}
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
|
@ -57,8 +62,11 @@ database:
|
|||
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||
password: "{{ POSTGRES_PASSWORD }}"
|
||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||
{% if not SYNAPSE_USE_UNIX_SOCKET %}
|
||||
{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
|
||||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
{% endif %}
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
{% else %}
|
||||
|
|
|
@ -74,6 +74,9 @@ MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
|||
MAIN_PROCESS_INSTANCE_NAME = "main"
|
||||
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
|
||||
MAIN_PROCESS_REPLICATION_PORT = 9093
|
||||
# Obviously, these would only be used with the UNIX socket option
|
||||
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
|
||||
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
|
||||
|
||||
# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
|
||||
# during processing with the name of the worker.
|
||||
|
@ -407,11 +410,15 @@ def add_worker_roles_to_shared_config(
|
|||
)
|
||||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
# Update the list of stream writers. It's convenient that the name of the worker
|
||||
# type is the same as the stream to write. Iterate over the whole list in case there
|
||||
# is more than one.
|
||||
|
@ -423,10 +430,15 @@ def add_worker_roles_to_shared_config(
|
|||
|
||||
# Map of stream writer instance names to host/ports combos
|
||||
# For now, all stream writers need http replication ports
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
||||
instance_map[worker_name] = {
|
||||
"path": f"/run/worker.{worker_port}",
|
||||
}
|
||||
else:
|
||||
instance_map[worker_name] = {
|
||||
"host": "localhost",
|
||||
"port": worker_port,
|
||||
}
|
||||
|
||||
|
||||
def merge_worker_template_configs(
|
||||
|
@ -718,17 +730,29 @@ def generate_worker_files(
|
|||
# Note that yaml cares about indentation, so care should be taken to insert lines
|
||||
# into files at the correct indentation below.
|
||||
|
||||
# Convenience helper for if using unix sockets instead of host:port
|
||||
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
|
||||
# First read the original config file and extract the listeners block. Then we'll
|
||||
# add another listener for replication. Later we'll write out the result to the
|
||||
# shared config file.
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
listeners: List[Any]
|
||||
if using_unix_sockets:
|
||||
listeners = [
|
||||
{
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
else:
|
||||
listeners = [
|
||||
{
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"type": "http",
|
||||
"resources": [{"names": ["replication"]}],
|
||||
}
|
||||
]
|
||||
with open(config_path) as file_stream:
|
||||
original_config = yaml.safe_load(file_stream)
|
||||
original_listeners = original_config.get("listeners")
|
||||
|
@ -769,7 +793,17 @@ def generate_worker_files(
|
|||
|
||||
# A list of internal endpoints to healthcheck, starting with the main process
|
||||
# which exists even if no workers do.
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
# This list ends up being part of the command line to curl, (curl added support for
|
||||
# Unix sockets in version 7.40).
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls = [
|
||||
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
|
||||
# The scheme and hostname from the following URL are ignored.
|
||||
# The only thing that matters is the path `/health`
|
||||
"http://localhost/health"
|
||||
]
|
||||
else:
|
||||
healthcheck_urls = ["http://localhost:8080/health"]
|
||||
|
||||
# Get the set of all worker types that we have configured
|
||||
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
||||
|
@ -806,8 +840,12 @@ def generate_worker_files(
|
|||
# given worker_type needs to stay assigned and not be replaced.
|
||||
worker_config["shared_extra_conf"].update(shared_config)
|
||||
shared_config = worker_config["shared_extra_conf"]
|
||||
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
if using_unix_sockets:
|
||||
healthcheck_urls.append(
|
||||
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
|
||||
)
|
||||
else:
|
||||
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
||||
|
||||
# Update the shared config with sharding-related options if necessary
|
||||
add_worker_roles_to_shared_config(
|
||||
|
@ -826,6 +864,7 @@ def generate_worker_files(
|
|||
"/conf/workers/{name}.yaml".format(name=worker_name),
|
||||
**worker_config,
|
||||
worker_log_config_filepath=log_config_filepath,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Save this worker's port number to the correct nginx upstreams
|
||||
|
@ -846,8 +885,13 @@ def generate_worker_files(
|
|||
nginx_upstream_config = ""
|
||||
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
||||
body = ""
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
if using_unix_sockets:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server unix:/run/worker.{port};\n"
|
||||
|
||||
else:
|
||||
for port in upstream_worker_ports:
|
||||
body += f" server localhost:{port};\n"
|
||||
|
||||
# Add to the list of configured upstreams
|
||||
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
||||
|
@ -877,10 +921,15 @@ def generate_worker_files(
|
|||
# If there are workers, add the main process to the instance_map too.
|
||||
if workers_in_use:
|
||||
instance_map = shared_config.setdefault("instance_map", {})
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
if using_unix_sockets:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
||||
}
|
||||
else:
|
||||
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
||||
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
||||
"port": MAIN_PROCESS_REPLICATION_PORT,
|
||||
}
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
|
@ -890,6 +939,7 @@ def generate_worker_files(
|
|||
appservice_registrations=appservice_registrations,
|
||||
enable_redis=workers_in_use,
|
||||
workers_in_use=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
|
@ -900,6 +950,7 @@ def generate_worker_files(
|
|||
upstream_directives=nginx_upstream_config,
|
||||
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
||||
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
|
@ -909,6 +960,7 @@ def generate_worker_files(
|
|||
"/etc/supervisor/supervisord.conf",
|
||||
main_config_path=config_path,
|
||||
enable_redis=workers_in_use,
|
||||
using_unix_sockets=using_unix_sockets,
|
||||
)
|
||||
|
||||
convert(
|
||||
|
|
|
@ -732,7 +732,8 @@ POST /_synapse/admin/v1/users/<user_id>/login
|
|||
|
||||
An optional `valid_until_ms` field can be specified in the request body as an
|
||||
integer timestamp that specifies when the token should expire. By default tokens
|
||||
do not expire.
|
||||
do not expire. Note that this API does not allow a user to login as themselves
|
||||
(to create more tokens).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
|
|
|
@ -370,6 +370,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
|||
See the [worker documentation](../workers.md) for additional information on workers.
|
||||
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
|
||||
- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
|
||||
- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
|
|
|
@ -88,6 +88,16 @@ process, for example:
|
|||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.89.0
|
||||
|
||||
## Removal of unspecced `user` property for `/register`
|
||||
|
||||
Application services can no longer call `/register` with a `user` property to create new users.
|
||||
The standard `username` property should be used instead. See the
|
||||
[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
|
||||
for more information.
|
||||
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
|
|
@ -462,6 +462,20 @@ See the docs [request log format](../administration/request_log.md).
|
|||
* `additional_resources`: Only valid for an 'http' listener. A map of
|
||||
additional endpoints which should be loaded via dynamic modules.
|
||||
|
||||
Unix socket support (_Added in Synapse 1.89.0_):
|
||||
* `path`: A path and filename for a Unix socket. Make sure it is located in a
|
||||
directory with read and write permissions, and that it already exists (the directory
|
||||
will not be created). Defaults to `None`.
|
||||
* **Note**: The use of both `path` and `port` options for the same `listener` is not
|
||||
compatible.
|
||||
* The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
|
||||
* Other options that would not make sense to use with a UNIX socket, such as
|
||||
`bind_addresses` and `tls` will be ignored and can be removed.
|
||||
* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
|
||||
* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
|
||||
Also make sure that `metrics` is not included in `resources` -> `names`
|
||||
|
||||
|
||||
Valid resource names are:
|
||||
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
|
||||
|
@ -474,7 +488,7 @@ Valid resource names are:
|
|||
|
||||
* `media`: the media API (/_matrix/media).
|
||||
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
|
||||
* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
|
||||
|
||||
* `openid`: OpenID authentication. See [here](../../openid.md).
|
||||
|
||||
|
@ -533,6 +547,22 @@ listeners:
|
|||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
Example configuration #3:
|
||||
```yaml
|
||||
listeners:
|
||||
# Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
|
||||
# lightweight interprocess communication without TCP/IP overhead, avoid port
|
||||
# conflicts, and providing enhanced security through system file permissions.
|
||||
#
|
||||
# Note that x_forwarded will default to true, when using a UNIX socket. Please see
|
||||
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
#
|
||||
- path: /var/run/synapse/main_public.sock
|
||||
type: http
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
---
|
||||
### `manhole_settings`
|
||||
|
||||
|
@ -3930,13 +3960,14 @@ federation_sender_instances:
|
|||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured, and to the main process.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
The main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is declared
|
||||
inside the `listener` block for a `replication` listener.
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP
|
||||
replication listener of the worker, if configured, and to the main process. Each worker
|
||||
declared under [`stream_writers`](../../workers.md#stream-writers) and
|
||||
[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP
|
||||
replication listener, and that listener should be included in the `instance_map`. The
|
||||
main process also needs an entry on the `instance_map`, and it should be listed under
|
||||
`main` **if even one other worker exists**. Ensure the port matches with what is
|
||||
declared inside the `listener` block for a `replication` listener.
|
||||
|
||||
|
||||
Example configuration:
|
||||
|
@ -3949,6 +3980,14 @@ instance_map:
|
|||
host: localhost
|
||||
port: 8034
|
||||
```
|
||||
Example configuration(#2, for UNIX sockets):
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
path: /var/run/synapse/main_replication.sock
|
||||
worker1:
|
||||
path: /var/run/synapse/worker1_replication.sock
|
||||
```
|
||||
---
|
||||
### `stream_writers`
|
||||
|
||||
|
@ -3966,6 +4005,24 @@ stream_writers:
|
|||
typing: worker1
|
||||
```
|
||||
---
|
||||
### `outbound_federation_restricted_to`
|
||||
|
||||
When using workers, you can restrict outbound federation traffic to only go through a
|
||||
specific subset of workers. Any worker specified here must also be in the
|
||||
[`instance_map`](#instance_map).
|
||||
[`worker_replication_secret`](#worker_replication_secret) must also be configured to
|
||||
authorize inter-worker communication.
|
||||
|
||||
```yaml
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
|
||||
Also see the [worker
|
||||
documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
|
||||
for more info.
|
||||
---
|
||||
### `run_background_tasks_on`
|
||||
|
||||
The [worker](../../workers.md#background-tasks) that is used to run
|
||||
|
@ -4108,6 +4165,18 @@ worker_listeners:
|
|||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
Example configuration(#2, using UNIX sockets with a `replication` listener):
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_public.sock
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
- type: http
|
||||
path: /var/run/synapse/worker_replication.sock
|
||||
resources:
|
||||
- names: [replication]
|
||||
```
|
||||
---
|
||||
### `worker_manhole`
|
||||
|
||||
|
|
|
@ -95,9 +95,12 @@ for the main process
|
|||
* Secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis)
|
||||
* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above). Note that the `host` defined
|
||||
is the address the worker needs to look for the `main` process at, not necessarily the same address that is bound to.
|
||||
with the `main` process defined, as well as the relevant connection information from
|
||||
it's HTTP `replication` listener (defined in step 1 above).
|
||||
* Note that the `host` defined is the address the worker needs to look for the `main`
|
||||
process at, not necessarily the same address that is bound to.
|
||||
* If you are using Unix sockets for the `replication` resource, make sure to
|
||||
use a `path` to the socket file instead of a `port`.
|
||||
* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
|
@ -174,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
|
|||
|
||||
You can start the main Synapse process with Poetry by running the following command:
|
||||
```console
|
||||
poetry run synapse_homeserver -c [your homeserver.yaml]
|
||||
poetry run synapse_homeserver --config-file [your homeserver.yaml]
|
||||
```
|
||||
For worker setups, you can run the following command
|
||||
```console
|
||||
poetry run synapse_worker -c [your worker.yaml]
|
||||
poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
|
||||
```
|
||||
## Available worker applications
|
||||
|
||||
|
@ -528,6 +531,30 @@ the stream writer for the `presence` stream:
|
|||
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
#### Restrict outbound federation traffic to a specific set of workers
|
||||
|
||||
The
|
||||
[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to)
|
||||
configuration is useful to make sure outbound federation traffic only goes through a
|
||||
specified subset of workers. This allows you to set more strict access controls (like a
|
||||
firewall) for all workers and only allow the `federation_sender`'s to contact the
|
||||
outside world.
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
main:
|
||||
host: localhost
|
||||
port: 8030
|
||||
federation_sender1:
|
||||
host: localhost
|
||||
port: 8034
|
||||
|
||||
outbound_federation_restricted_to:
|
||||
- federation_sender1
|
||||
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
|
||||
#### Background tasks
|
||||
|
||||
There is also support for moving background tasks to a separate
|
||||
|
|
96
flake.lock
96
flake.lock
|
@ -22,27 +22,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682490133,
|
||||
"narHash": "sha256-tR2Qx0uuk97WySpSSk4rGS/oH7xb5LykbjATcw1vw1I=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "4e9412753ab75ef0e038a5fe54a062fb44c27c6a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
|
@ -74,6 +53,24 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
|
@ -200,6 +197,22 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681358109,
|
||||
"narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pre-commit-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
|
@ -231,25 +244,27 @@
|
|||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"fenix": "fenix",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"systems": "systems"
|
||||
"rust-overlay": "rust-overlay",
|
||||
"systems": "systems_2"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682426789,
|
||||
"narHash": "sha256-UqnLmJESRZE0tTEaGbRAw05Hm19TWIPA+R3meqi5I4w=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "943d2a8a1ca15e8b28a1f51f5a5c135e3728da04",
|
||||
"lastModified": 1689302058,
|
||||
"narHash": "sha256-yD74lcHTrw4niXcE9goJLbzsgyce48rQQoy5jK5ZK40=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "7b8dbbf4c67ed05a9bf3d9e658c12d4108bc24c8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
|
@ -267,6 +282,21 @@
|
|||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
|
33
flake.nix
33
flake.nix
|
@ -46,20 +46,20 @@
|
|||
systems.url = "github:nix-systems/default";
|
||||
# A development environment manager built on Nix. See https://devenv.sh.
|
||||
devenv.url = "github:cachix/devenv/main";
|
||||
# Rust toolchains and rust-analyzer nightly.
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
# Rust toolchain.
|
||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, devenv, systems, ... } @ inputs:
|
||||
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
|
||||
let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
devShells = forEachSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
overlays = [ (import rust-overlay) ];
|
||||
pkgs = import nixpkgs {
|
||||
inherit system overlays;
|
||||
};
|
||||
in {
|
||||
# Everything is configured via devenv - a Nix module for creating declarative
|
||||
# developer environments. See https://devenv.sh/reference/options/ for a list
|
||||
|
@ -76,6 +76,20 @@
|
|||
# Configure packages to install.
|
||||
# Search for package names at https://search.nixos.org/packages?channel=unstable
|
||||
packages = with pkgs; [
|
||||
# The rust toolchain and related tools.
|
||||
# This will install the "default" profile of rust components.
|
||||
# https://rust-lang.github.io/rustup/concepts/profiles.html
|
||||
#
|
||||
# NOTE: We currently need to set the Rust version unnecessarily high
|
||||
# in order to work around https://github.com/matrix-org/synapse/issues/15939
|
||||
(rust-bin.stable."1.70.0".default.override {
|
||||
# Additionally install the "rust-src" extension to allow diving into the
|
||||
# Rust source code in an IDE (rust-analyzer will also make use of it).
|
||||
extensions = [ "rust-src" ];
|
||||
})
|
||||
# The rust-analyzer language server implementation.
|
||||
rust-analyzer
|
||||
|
||||
# Native dependencies for running Synapse.
|
||||
icu
|
||||
libffi
|
||||
|
@ -124,12 +138,11 @@
|
|||
# Install dependencies for the additional programming languages
|
||||
# involved with Synapse development.
|
||||
#
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# * Golang is needed to run the Complement test suite.
|
||||
# * Perl is needed to run the SyTest test suite.
|
||||
# * Rust is used for developing and running Synapse.
|
||||
# It is installed manually with `packages` above.
|
||||
languages.go.enable = true;
|
||||
languages.rust.enable = true;
|
||||
languages.rust.version = "stable";
|
||||
languages.perl.enable = true;
|
||||
|
||||
# Postgres is needed to run Synapse with postgres support and
|
||||
|
|
|
@ -460,30 +460,34 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "41.0.1"
|
||||
version = "41.0.2"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:f73bff05db2a3e5974a6fd248af2566134d8981fd7ab012e5dd4ddb1d9a70699"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1a5472d40c8f8e91ff7a3d8ac6dfa363d8e3138b961529c996f3e2df0c7a411a"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fa01527046ca5facdf973eef2535a27fec4cb651e4daec4d043ef63f6ecd4ca"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b46e37db3cc267b4dea1f56da7346c9727e1209aa98487179ee8ebed09d21e43"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d198820aba55660b4d74f7b5fd1f17db3aa5eb3e6893b0a41b75e84e4f9e0e4b"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:948224d76c4b6457349d47c0c98657557f429b4e93057cf5a2f71d603e2fc3a3"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:059e348f9a3c1950937e1b5d7ba1f8e968508ab181e75fc32b879452f08356db"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b4ceb5324b998ce2003bc17d519080b4ec8d5b7b70794cbd2836101406a9be31"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-win32.whl", hash = "sha256:8f4ab7021127a9b4323537300a2acfb450124b2def3756f64dc3a3d2160ee4b5"},
|
||||
{file = "cryptography-41.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:1fee5aacc7367487b4e22484d3c7e547992ed726d14864ee33c0176ae43b0d7c"},
|
||||
{file = "cryptography-41.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9a6c7a3c87d595608a39980ebaa04d5a37f94024c9f24eb7d10262b92f739ddb"},
|
||||
{file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5d092fdfedaec4cbbffbf98cddc915ba145313a6fdaab83c6e67f4e6c218e6f3"},
|
||||
{file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a8e6c2de6fbbcc5e14fd27fb24414507cb3333198ea9ab1258d916f00bc3039"},
|
||||
{file = "cryptography-41.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb33ccf15e89f7ed89b235cff9d49e2e62c6c981a6061c9c8bb47ed7951190bc"},
|
||||
{file = "cryptography-41.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f0ff6e18d13a3de56f609dd1fd11470918f770c6bd5d00d632076c727d35485"},
|
||||
{file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7bfc55a5eae8b86a287747053140ba221afc65eb06207bedf6e019b8934b477c"},
|
||||
{file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eb8163f5e549a22888c18b0d53d6bb62a20510060a22fd5a995ec8a05268df8a"},
|
||||
{file = "cryptography-41.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8dde71c4169ec5ccc1087bb7521d54251c016f126f922ab2dfe6649170a3b8c5"},
|
||||
{file = "cryptography-41.0.1.tar.gz", hash = "sha256:d34579085401d3f49762d2f7d6634d6b6c2ae1242202e860f4d26b046e3a1006"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-win32.whl", hash = "sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76"},
|
||||
{file = "cryptography-41.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766"},
|
||||
{file = "cryptography-41.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa"},
|
||||
{file = "cryptography-41.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f"},
|
||||
{file = "cryptography-41.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0"},
|
||||
{file = "cryptography-41.0.2.tar.gz", hash = "sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -969,25 +973,42 @@ i18n = ["Babel (>=2.7)"]
|
|||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "4.17.3"
|
||||
version = "4.18.3"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
|
||||
{file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
|
||||
{file = "jsonschema-4.18.3-py3-none-any.whl", hash = "sha256:aab78b34c2de001c6b692232f08c21a97b436fe18e0b817bf0511046924fceef"},
|
||||
{file = "jsonschema-4.18.3.tar.gz", hash = "sha256:64b7104d72efe856bea49ca4af37a14a9eba31b40bb7238179f3803130fd34d9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
attrs = ">=17.4.0"
|
||||
attrs = ">=22.2.0"
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
jsonschema-specifications = ">=2023.03.6"
|
||||
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
|
||||
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
|
||||
referencing = ">=0.28.4"
|
||||
rpds-py = ">=0.7.1"
|
||||
|
||||
[package.extras]
|
||||
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
|
||||
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema-specifications"
|
||||
version = "2023.6.1"
|
||||
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "jsonschema_specifications-2023.6.1-py3-none-any.whl", hash = "sha256:3d2b82663aff01815f744bb5c7887e2121a63399b49b104a3c96145474d091d7"},
|
||||
{file = "jsonschema_specifications-2023.6.1.tar.gz", hash = "sha256:ca1c4dd059a9e7b34101cf5b3ab7ff1d18b139f35950d598d629837ef66e8f28"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
referencing = ">=0.28.0"
|
||||
|
||||
[[package]]
|
||||
name = "keyring"
|
||||
version = "23.13.1"
|
||||
|
@ -1081,6 +1102,8 @@ files = [
|
|||
{file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"},
|
||||
{file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"},
|
||||
{file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"},
|
||||
|
@ -1728,13 +1751,13 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytes
|
|||
|
||||
[[package]]
|
||||
name = "prometheus-client"
|
||||
version = "0.17.0"
|
||||
version = "0.17.1"
|
||||
description = "Python client for the Prometheus monitoring system."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "prometheus_client-0.17.0-py3-none-any.whl", hash = "sha256:a77b708cf083f4d1a3fb3ce5c95b4afa32b9c521ae363354a4a910204ea095ce"},
|
||||
{file = "prometheus_client-0.17.0.tar.gz", hash = "sha256:9c3b26f1535945e85b8934fb374678d263137b78ef85f305b1156c7c881cd11b"},
|
||||
{file = "prometheus_client-0.17.1-py3-none-any.whl", hash = "sha256:e537f37160f6807b8202a6fc4764cdd19bac5480ddd3e0d463c3002b34462101"},
|
||||
{file = "prometheus_client-0.17.1.tar.gz", hash = "sha256:21e674f39831ae3f8acde238afd9a27a37d0d2fb5a28ea094f0ce25d2cbf2091"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
|
@ -1827,47 +1850,47 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "1.10.10"
|
||||
version = "1.10.11"
|
||||
description = "Data validation and settings management using python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pydantic-1.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adad1ee4ab9888f12dac2529276704e719efcf472e38df7813f5284db699b4ec"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a7db03339893feef2092ff7b1afc9497beed15ebd4af84c3042a74abce02d48"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b3714b97ff84b2689654851c2426389bcabfac9080617bcf4306c69db606f6"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edfdf0a5abc5c9bf2052ebaec20e67abd52e92d257e4f2d30e02c354ed3e6030"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a3b30fd255eeeb63caa9483502ba96b7795ce5bf895c6a179b3d909d9f53a6"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db4c7f7e60ca6f7d6c1785070f3e5771fcb9b2d88546e334d2f2c3934d949028"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:a2d5be50ac4a0976817144c7d653e34df2f9436d15555189f5b6f61161d64183"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:566a04ba755e8f701b074ffb134ddb4d429f75d5dced3fbd829a527aafe74c71"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f79db3652ed743309f116ba863dae0c974a41b688242482638b892246b7db21d"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62376890b819bebe3c717a9ac841a532988372b7e600e76f75c9f7c128219d5"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4870f13a4fafd5bc3e93cff3169222534fad867918b188e83ee0496452978437"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:990027e77cda6072a566e433b6962ca3b96b4f3ae8bd54748e9d62a58284d9d7"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8c40964596809eb616d94f9c7944511f620a1103d63d5510440ed2908fc410af"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:ea9eebc2ebcba3717e77cdeee3f6203ffc0e78db5f7482c68b1293e8cc156e5e"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:762aa598f79b4cac2f275d13336b2dd8662febee2a9c450a49a2ab3bec4b385f"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dab5219659f95e357d98d70577b361383057fb4414cfdb587014a5f5c595f7b"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3d4ee957a727ccb5a36f1b0a6dbd9fad5dedd2a41eada99a8df55c12896e18d"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b69f9138dec566962ec65623c9d57bee44412d2fc71065a5f3ebb3820bdeee96"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7aa75d1bd9cc275cf9782f50f60cddaf74cbaae19b6ada2a28e737edac420312"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-win_amd64.whl", hash = "sha256:9f62a727f5c590c78c2d12fda302d1895141b767c6488fe623098f8792255fe5"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aac218feb4af73db8417ca7518fb3bade4534fcca6e3fb00f84966811dd94450"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88546dc10a40b5b52cae87d64666787aeb2878f9a9b37825aedc2f362e7ae1da"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c41bbaae89e32fc582448e71974de738c055aef5ab474fb25692981a08df808a"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b71bd504d1573b0b722ae536e8ffb796bedeef978979d076bf206e77dcc55a5"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e088e3865a2270ecbc369924cd7d9fbc565667d9158e7f304e4097ebb9cf98dd"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3403a090db45d4027d2344859d86eb797484dfda0706cf87af79ace6a35274ef"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:e0014e29637125f4997c174dd6167407162d7af0da73414a9340461ea8573252"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9965e49c6905840e526e5429b09e4c154355b6ecc0a2f05492eda2928190311d"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:748d10ab6089c5d196e1c8be9de48274f71457b01e59736f7a09c9dc34f51887"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86936c383f7c38fd26d35107eb669c85d8f46dfceae873264d9bab46fe1c7dde"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a26841be620309a9697f5b1ffc47dce74909e350c5315ccdac7a853484d468a"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:409b810f387610cc7405ab2fa6f62bdf7ea485311845a242ebc0bd0496e7e5ac"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ce937a2a2c020bcad1c9fde02892392a1123de6dda906ddba62bfe8f3e5989a2"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:37ebddef68370e6f26243acc94de56d291e01227a67b2ace26ea3543cf53dd5f"},
|
||||
{file = "pydantic-1.10.10-py3-none-any.whl", hash = "sha256:a5939ec826f7faec434e2d406ff5e4eaf1716eb1f247d68cd3d0b3612f7b4c8a"},
|
||||
{file = "pydantic-1.10.10.tar.gz", hash = "sha256:3b8d5bd97886f9eb59260594207c9f57dce14a6f869c6ceea90188715d29921a"},
|
||||
{file = "pydantic-1.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f"},
|
||||
{file = "pydantic-1.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e"},
|
||||
{file = "pydantic-1.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151"},
|
||||
{file = "pydantic-1.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7"},
|
||||
{file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588"},
|
||||
{file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f"},
|
||||
{file = "pydantic-1.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847"},
|
||||
{file = "pydantic-1.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb"},
|
||||
{file = "pydantic-1.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b"},
|
||||
{file = "pydantic-1.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae"},
|
||||
{file = "pydantic-1.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66"},
|
||||
{file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216"},
|
||||
{file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c"},
|
||||
{file = "pydantic-1.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b"},
|
||||
{file = "pydantic-1.10.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6"},
|
||||
{file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713"},
|
||||
{file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c"},
|
||||
{file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248"},
|
||||
{file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36"},
|
||||
{file = "pydantic-1.10.11-cp37-cp37m-win_amd64.whl", hash = "sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629"},
|
||||
{file = "pydantic-1.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3"},
|
||||
{file = "pydantic-1.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f"},
|
||||
{file = "pydantic-1.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb"},
|
||||
{file = "pydantic-1.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d"},
|
||||
{file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f"},
|
||||
{file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e"},
|
||||
{file = "pydantic-1.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19"},
|
||||
{file = "pydantic-1.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622"},
|
||||
{file = "pydantic-1.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1"},
|
||||
{file = "pydantic-1.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999"},
|
||||
{file = "pydantic-1.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303"},
|
||||
{file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604"},
|
||||
{file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13"},
|
||||
{file = "pydantic-1.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e"},
|
||||
{file = "pydantic-1.10.11-py3-none-any.whl", hash = "sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e"},
|
||||
{file = "pydantic-1.10.11.tar.gz", hash = "sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -2008,42 +2031,6 @@ cryptography = ">=38.0.0,<40.0.0 || >40.0.0,<40.0.1 || >40.0.1,<42"
|
|||
docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
|
||||
test = ["flaky", "pretend", "pytest (>=3.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyrsistent"
|
||||
version = "0.19.3"
|
||||
description = "Persistent/Functional/Immutable data structures"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"},
|
||||
{file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"},
|
||||
{file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"},
|
||||
{file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"},
|
||||
{file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"},
|
||||
{file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"},
|
||||
{file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"},
|
||||
{file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"},
|
||||
{file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"},
|
||||
{file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"},
|
||||
{file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"},
|
||||
{file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"},
|
||||
{file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"},
|
||||
{file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"},
|
||||
{file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"},
|
||||
{file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"},
|
||||
{file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"},
|
||||
{file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"},
|
||||
{file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"},
|
||||
{file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"},
|
||||
{file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"},
|
||||
{file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"},
|
||||
{file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"},
|
||||
{file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"},
|
||||
{file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"},
|
||||
{file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"},
|
||||
{file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pysaml2"
|
||||
version = "7.3.1"
|
||||
|
@ -2172,6 +2159,21 @@ Pygments = ">=2.5.1"
|
|||
[package.extras]
|
||||
md = ["cmarkgfm (>=0.8.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "referencing"
|
||||
version = "0.29.1"
|
||||
description = "JSON Referencing + Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "referencing-0.29.1-py3-none-any.whl", hash = "sha256:d3c8f323ee1480095da44d55917cfb8278d73d6b4d5f677e3e40eb21314ac67f"},
|
||||
{file = "referencing-0.29.1.tar.gz", hash = "sha256:90cb53782d550ba28d2166ef3f55731f38397def8832baac5d45235f1995e35e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
attrs = ">=22.2.0"
|
||||
rpds-py = ">=0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.31.0"
|
||||
|
@ -2240,6 +2242,112 @@ typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9
|
|||
[package.extras]
|
||||
jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
||||
|
||||
[[package]]
|
||||
name = "rpds-py"
|
||||
version = "0.8.10"
|
||||
description = "Python bindings to Rust's persistent data structures (rpds)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:93d06cccae15b3836247319eee7b6f1fdcd6c10dabb4e6d350d27bd0bdca2711"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3816a890a6a9e9f1de250afa12ca71c9a7a62f2b715a29af6aaee3aea112c181"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7c6304b894546b5a6bdc0fe15761fa53fe87d28527a7142dae8de3c663853e1"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad3bfb44c8840fb4be719dc58e229f435e227fbfbe133dc33f34981ff622a8f8"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14f1c356712f66653b777ecd8819804781b23dbbac4eade4366b94944c9e78ad"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82bb361cae4d0a627006dadd69dc2f36b7ad5dc1367af9d02e296ec565248b5b"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2e3c4f2a8e3da47f850d7ea0d7d56720f0f091d66add889056098c4b2fd576c"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15a90d0ac11b4499171067ae40a220d1ca3cb685ec0acc356d8f3800e07e4cb8"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:70bb9c8004b97b4ef7ae56a2aa56dfaa74734a0987c78e7e85f00004ab9bf2d0"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d64f9f88d5203274a002b54442cafc9c7a1abff2a238f3e767b70aadf919b451"},
|
||||
{file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ccbbd276642788c4376fbe8d4e6c50f0fb4972ce09ecb051509062915891cbf0"},
|
||||
{file = "rpds_py-0.8.10-cp310-none-win32.whl", hash = "sha256:fafc0049add8043ad07ab5382ee80d80ed7e3699847f26c9a5cf4d3714d96a84"},
|
||||
{file = "rpds_py-0.8.10-cp310-none-win_amd64.whl", hash = "sha256:915031002c86a5add7c6fd4beb601b2415e8a1c956590a5f91d825858e92fe6e"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:84eb541a44f7a18f07a6bfc48b95240739e93defe1fdfb4f2a295f37837945d7"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f59996d0550894affaad8743e97b9b9c98f638b221fac12909210ec3d9294786"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9adb5664b78fcfcd830000416c8cc69853ef43cb084d645b3f1f0296edd9bae"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f96f3f98fbff7af29e9edf9a6584f3c1382e7788783d07ba3721790625caa43e"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:376b8de737401050bd12810003d207e824380be58810c031f10ec563ff6aef3d"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d1c2bc319428d50b3e0fa6b673ab8cc7fa2755a92898db3a594cbc4eeb6d1f7"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73a1e48430f418f0ac3dfd87860e4cc0d33ad6c0f589099a298cb53724db1169"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134ec8f14ca7dbc6d9ae34dac632cdd60939fe3734b5d287a69683c037c51acb"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4b519bac7c09444dd85280fd60f28c6dde4389c88dddf4279ba9b630aca3bbbe"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9cd57981d9fab04fc74438d82460f057a2419974d69a96b06a440822d693b3c0"},
|
||||
{file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69d089c026f6a8b9d64a06ff67dc3be196707b699d7f6ca930c25f00cf5e30d8"},
|
||||
{file = "rpds_py-0.8.10-cp311-none-win32.whl", hash = "sha256:220bdcad2d2936f674650d304e20ac480a3ce88a40fe56cd084b5780f1d104d9"},
|
||||
{file = "rpds_py-0.8.10-cp311-none-win_amd64.whl", hash = "sha256:6c6a0225b8501d881b32ebf3f5807a08ad3685b5eb5f0a6bfffd3a6e039b2055"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e3d0cd3dff0e7638a7b5390f3a53057c4e347f4ef122ee84ed93fc2fb7ea4aa2"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d77dff3a5aa5eedcc3da0ebd10ff8e4969bc9541aa3333a8d41715b429e99f47"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41c89a366eae49ad9e65ed443a8f94aee762931a1e3723749d72aeac80f5ef2f"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3793c21494bad1373da517001d0849eea322e9a049a0e4789e50d8d1329df8e7"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:805a5f3f05d186c5d50de2e26f765ba7896d0cc1ac5b14ffc36fae36df5d2f10"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b01b39ad5411563031ea3977bbbc7324d82b088e802339e6296f082f78f6115c"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f1e860be21f3e83011116a65e7310486300e08d9a3028e73e8d13bb6c77292"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a13c8e56c46474cd5958d525ce6a9996727a83d9335684e41f5192c83deb6c58"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:93d99f957a300d7a4ced41615c45aeb0343bb8f067c42b770b505de67a132346"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:148b0b38d719c0760e31ce9285a9872972bdd7774969a4154f40c980e5beaca7"},
|
||||
{file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3cc5e5b5514796f45f03a568981971b12a3570f3de2e76114f7dc18d4b60a3c4"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e8e24b210a4deb5a7744971f8f77393005bae7f873568e37dfd9effe808be7f7"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b41941583adce4242af003d2a8337b066ba6148ca435f295f31ac6d9e4ea2722"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c490204e16bca4f835dba8467869fe7295cdeaa096e4c5a7af97f3454a97991"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ee45cd1d84beed6cbebc839fd85c2e70a3a1325c8cfd16b62c96e2ffb565eca"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a8ca409f1252e1220bf09c57290b76cae2f14723746215a1e0506472ebd7bdf"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96b293c0498c70162effb13100624c5863797d99df75f2f647438bd10cbf73e4"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4627520a02fccbd324b33c7a83e5d7906ec746e1083a9ac93c41ac7d15548c7"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e39d7ab0c18ac99955b36cd19f43926450baba21e3250f053e0704d6ffd76873"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ba9f1d1ebe4b63801977cec7401f2d41e888128ae40b5441270d43140efcad52"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:802f42200d8caf7f25bbb2a6464cbd83e69d600151b7e3b49f49a47fa56b0a38"},
|
||||
{file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d19db6ba816e7f59fc806c690918da80a7d186f00247048cd833acdab9b4847b"},
|
||||
{file = "rpds_py-0.8.10-cp38-none-win32.whl", hash = "sha256:7947e6e2c2ad68b1c12ee797d15e5f8d0db36331200b0346871492784083b0c6"},
|
||||
{file = "rpds_py-0.8.10-cp38-none-win_amd64.whl", hash = "sha256:fa326b3505d5784436d9433b7980171ab2375535d93dd63fbcd20af2b5ca1bb6"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7b38a9ac96eeb6613e7f312cd0014de64c3f07000e8bf0004ad6ec153bac46f8"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c4d42e83ddbf3445e6514f0aff96dca511421ed0392d9977d3990d9f1ba6753c"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b21575031478609db6dbd1f0465e739fe0e7f424a8e7e87610a6c7f68b4eb16"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:574868858a7ff6011192c023a5289158ed20e3f3b94b54f97210a773f2f22921"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae40f4a70a1f40939d66ecbaf8e7edc144fded190c4a45898a8cfe19d8fc85ea"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37f7ee4dc86db7af3bac6d2a2cedbecb8e57ce4ed081f6464510e537589f8b1e"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:695f642a3a5dbd4ad2ffbbacf784716ecd87f1b7a460843b9ddf965ccaeafff4"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f43ab4cb04bde6109eb2555528a64dfd8a265cc6a9920a67dcbde13ef53a46c8"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a11ab0d97be374efd04f640c04fe5c2d3dabc6dfb998954ea946ee3aec97056d"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:92cf5b3ee60eef41f41e1a2cabca466846fb22f37fc580ffbcb934d1bcab225a"},
|
||||
{file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ceaac0c603bf5ac2f505a78b2dcab78d3e6b706be6596c8364b64cc613d208d2"},
|
||||
{file = "rpds_py-0.8.10-cp39-none-win32.whl", hash = "sha256:dd4f16e57c12c0ae17606c53d1b57d8d1c8792efe3f065a37cb3341340599d49"},
|
||||
{file = "rpds_py-0.8.10-cp39-none-win_amd64.whl", hash = "sha256:c03a435d26c3999c2a8642cecad5d1c4d10c961817536af52035f6f4ee2f5dd0"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0da53292edafecba5e1d8c1218f99babf2ed0bf1c791d83c0ab5c29b57223068"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d20a8ed227683401cc508e7be58cba90cc97f784ea8b039c8cd01111e6043e0"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97cab733d303252f7c2f7052bf021a3469d764fc2b65e6dbef5af3cbf89d4892"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8c398fda6df361a30935ab4c4bccb7f7a3daef2964ca237f607c90e9f3fdf66f"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2eb4b08c45f8f8d8254cdbfacd3fc5d6b415d64487fb30d7380b0d0569837bf1"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7dfb1cbb895810fa2b892b68153c17716c6abaa22c7dc2b2f6dcf3364932a1c"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89c92b74e8bf6f53a6f4995fd52f4bd510c12f103ee62c99e22bc9e05d45583c"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9c0683cb35a9b5881b41bc01d5568ffc667910d9dbc632a1fba4e7d59e98773"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0eeb2731708207d0fe2619afe6c4dc8cb9798f7de052da891de5f19c0006c315"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:7495010b658ec5b52835f21d8c8b1a7e52e194c50f095d4223c0b96c3da704b1"},
|
||||
{file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c72ebc22e70e04126158c46ba56b85372bc4d54d00d296be060b0db1671638a4"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2cd3045e7f6375dda64ed7db1c5136826facb0159ea982f77d9cf6125025bd34"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2418cf17d653d24ffb8b75e81f9f60b7ba1b009a23298a433a4720b2a0a17017"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a2edf8173ac0c7a19da21bc68818be1321998528b5e3f748d6ee90c0ba2a1fd"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f29b8c55fd3a2bc48e485e37c4e2df3317f43b5cc6c4b6631c33726f52ffbb3"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a7d20c1cf8d7b3960c5072c265ec47b3f72a0c608a9a6ee0103189b4f28d531"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:521fc8861a86ae54359edf53a15a05fabc10593cea7b3357574132f8427a5e5a"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5c191713e98e7c28800233f039a32a42c1a4f9a001a8a0f2448b07391881036"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:083df0fafe199371206111583c686c985dddaf95ab3ee8e7b24f1fda54515d09"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ed41f3f49507936a6fe7003985ea2574daccfef999775525d79eb67344e23767"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:2614c2732bf45de5c7f9e9e54e18bc78693fa2f635ae58d2895b7965e470378c"},
|
||||
{file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c60528671d9d467009a6ec284582179f6b88651e83367d0ab54cb739021cd7de"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ee744fca8d1ea822480a2a4e7c5f2e1950745477143668f0b523769426060f29"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a38b9f526d0d6cbdaa37808c400e3d9f9473ac4ff64d33d9163fd05d243dbd9b"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60e0e86e870350e03b3e25f9b1dd2c6cc72d2b5f24e070249418320a6f9097b7"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f53f55a8852f0e49b0fc76f2412045d6ad9d5772251dea8f55ea45021616e7d5"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c493365d3fad241d52f096e4995475a60a80f4eba4d3ff89b713bc65c2ca9615"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:300eb606e6b94a7a26f11c8cc8ee59e295c6649bd927f91e1dbd37a4c89430b6"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a665f6f1a87614d1c3039baf44109094926dedf785e346d8b0a728e9cabd27a"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:927d784648211447201d4c6f1babddb7971abad922b32257ab74de2f2750fad0"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c200b30dd573afa83847bed7e3041aa36a8145221bf0cfdfaa62d974d720805c"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:08166467258fd0240a1256fce272f689f2360227ee41c72aeea103e9e4f63d2b"},
|
||||
{file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:996cc95830de9bc22b183661d95559ec6b3cd900ad7bc9154c4cbf5be0c9b734"},
|
||||
{file = "rpds_py-0.8.10.tar.gz", hash = "sha256:13e643ce8ad502a0263397362fb887594b49cf84bf518d6038c16f235f2bcea4"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.0.277"
|
||||
|
@ -2904,13 +3012,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "types-pillow"
|
||||
version = "9.5.0.4"
|
||||
version = "10.0.0.1"
|
||||
description = "Typing stubs for Pillow"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-Pillow-9.5.0.4.tar.gz", hash = "sha256:f1b6af47abd151847ee25911ffeba784899bc7dc7f9eba8ca6a5aac522b012ef"},
|
||||
{file = "types_Pillow-9.5.0.4-py3-none-any.whl", hash = "sha256:69427d9fa4320ff6e30f00fb9c0dd71185dc0a16de4757774220104759483466"},
|
||||
{file = "types-Pillow-10.0.0.1.tar.gz", hash = "sha256:834a07a04504f8bf37936679bc6a5802945e7644d0727460c0c4d4307967e2a3"},
|
||||
{file = "types_Pillow-10.0.0.1-py3-none-any.whl", hash = "sha256:be576b67418f1cb3b93794cf7946581be1009a33a10085b3c132eb0875a819b4"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2987,13 +3095,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.5.0"
|
||||
version = "4.7.1"
|
||||
description = "Backported and Experimental Type Hints for Python 3.7+"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"},
|
||||
{file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"},
|
||||
{file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"},
|
||||
{file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
|
@ -22,14 +22,18 @@ from typing import Collection, Optional, Sequence, Set
|
|||
|
||||
# These are expanded inside the dockerfile to be a fully qualified image name.
|
||||
# e.g. docker.io/library/debian:bullseye
|
||||
#
|
||||
# If an EOL is forced by a Python version and we're dropping support for it, make sure
|
||||
# to remove references to the distibution across Synapse (search for "bullseye" for
|
||||
# example)
|
||||
DISTS = (
|
||||
"debian:bullseye",
|
||||
"debian:bookworm",
|
||||
"debian:sid",
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01)
|
||||
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
|
||||
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
|
||||
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
|
||||
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
|
||||
)
|
||||
|
||||
DESC = """\
|
||||
|
|
|
@ -214,7 +214,7 @@ fi
|
|||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
|
@ -253,6 +253,10 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
|
|||
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
|
||||
fi
|
||||
|
||||
if [[ -n "$UNIX_SOCKETS" ]]; then
|
||||
# Enable full on Unix socket mode for Synapse, Redis and Postgresql
|
||||
export PASS_SYNAPSE_USE_UNIX_SOCKET=1
|
||||
fi
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
|
|
|
@ -217,6 +217,13 @@ class InvalidAPICallError(SynapseError):
|
|||
super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON)
|
||||
|
||||
|
||||
class InvalidProxyCredentialsError(SynapseError):
|
||||
"""Error raised when the proxy credentials are invalid."""
|
||||
|
||||
def __init__(self, msg: str, errcode: str = Codes.UNKNOWN):
|
||||
super().__init__(401, msg, errcode)
|
||||
|
||||
|
||||
class ProxiedRequestError(SynapseError):
|
||||
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
|
||||
|
||||
|
|
|
@ -78,36 +78,29 @@ class RoomVersion:
|
|||
# MSC2209: Check 'notifications' key while verifying
|
||||
# m.room.power_levels auth rules.
|
||||
limit_notifications_power_levels: bool
|
||||
# MSC2175: No longer include the creator in m.room.create events.
|
||||
msc2175_implicit_room_creator: bool
|
||||
# MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to
|
||||
# content property.
|
||||
msc2176_redaction_rules: bool
|
||||
# MSC3083: Support the 'restricted' join_rule.
|
||||
msc3083_join_rules: bool
|
||||
# MSC3375: Support for the proper redaction rules for MSC3083. This mustn't
|
||||
# be enabled if MSC3083 is not.
|
||||
msc3375_redaction_rules: bool
|
||||
# MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
|
||||
# m.room.membership event with membership 'knock'.
|
||||
msc2403_knocking: bool
|
||||
# No longer include the creator in m.room.create events.
|
||||
implicit_room_creator: bool
|
||||
# Apply updated redaction rules algorithm from room version 11.
|
||||
updated_redaction_rules: bool
|
||||
# Support the 'restricted' join rule.
|
||||
restricted_join_rule: bool
|
||||
# Support for the proper redaction rules for the restricted join rule. This requires
|
||||
# restricted_join_rule to be enabled.
|
||||
restricted_join_rule_fix: bool
|
||||
# Support the 'knock' join rule.
|
||||
knock_join_rule: bool
|
||||
# MSC3389: Protect relation information from redaction.
|
||||
msc3389_relation_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
# MSC3821: Do not redact the third_party_invite content field for membership events.
|
||||
msc3821_redaction_rules: bool
|
||||
# Support the 'knock_restricted' join rule.
|
||||
knock_restricted_join_rule: bool
|
||||
# Enforce integer power levels
|
||||
enforce_int_power_levels: bool
|
||||
# MSC3931: Adds a push rule condition for "room version feature flags", making
|
||||
# some push rules room version dependent. Note that adding a flag to this list
|
||||
# is not enough to mark it "supported": the push rule evaluator also needs to
|
||||
# support the flag. Unknown flags are ignored by the evaluator, making conditions
|
||||
# fail if used.
|
||||
msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
|
||||
# MSC3989: Redact the origin field.
|
||||
msc3989_redaction_rules: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
|
@ -120,17 +113,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
|
@ -141,17 +132,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
|
@ -162,17 +151,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
|
@ -183,17 +170,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
|
@ -204,17 +189,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=True,
|
||||
strict_canonicaljson=False,
|
||||
limit_notifications_power_levels=False,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
|
@ -225,38 +208,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=True,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=False,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
|
@ -267,17 +227,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=False,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
|
@ -288,17 +246,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=False,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
|
@ -309,59 +265,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=False,
|
||||
enforce_int_power_levels=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=False,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3821 = RoomVersion(
|
||||
"org.matrix.msc3821.opt1",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
msc3821_redaction_rules=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
|
@ -372,17 +284,15 @@ class RoomVersions:
|
|||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC1767v10 = RoomVersion(
|
||||
# MSC1767 (Extensible Events) based on room version "10"
|
||||
|
@ -394,60 +304,34 @@ class RoomVersions:
|
|||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=False,
|
||||
updated_redaction_rules=False,
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
|
||||
msc3989_redaction_rules=False,
|
||||
)
|
||||
MSC3989 = RoomVersion(
|
||||
"org.matrix.msc3989",
|
||||
RoomDisposition.UNSTABLE,
|
||||
V11 = RoomVersion(
|
||||
"11",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=False,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
implicit_room_creator=True, # Used by MSC3820
|
||||
updated_redaction_rules=True, # Used by MSC3820
|
||||
restricted_join_rule=True,
|
||||
restricted_join_rule_fix=True,
|
||||
knock_join_rule=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=False,
|
||||
knock_restricted_join_rule=True,
|
||||
enforce_int_power_levels=True,
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True,
|
||||
)
|
||||
MSC3820opt2 = RoomVersion(
|
||||
# Based upon v10
|
||||
"org.matrix.msc3820.opt2",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2175_implicit_room_creator=True, # Used by MSC3820
|
||||
msc2176_redaction_rules=True, # Used by MSC3820
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc3389_relation_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
msc3821_redaction_rules=True, # Used by MSC3820
|
||||
msc3931_push_features=(),
|
||||
msc3989_redaction_rules=True, # Used by MSC3820
|
||||
)
|
||||
|
||||
|
||||
|
@ -460,14 +344,11 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
|||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.V7,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.MSC3989,
|
||||
RoomVersions.MSC3820opt2,
|
||||
RoomVersions.V11,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -496,12 +377,12 @@ MSC3244_CAPABILITIES = {
|
|||
RoomVersionCapability(
|
||||
"knock",
|
||||
RoomVersions.V7,
|
||||
lambda room_version: room_version.msc2403_knocking,
|
||||
lambda room_version: room_version.knock_join_rule,
|
||||
),
|
||||
RoomVersionCapability(
|
||||
"restricted",
|
||||
RoomVersions.V9,
|
||||
lambda room_version: room_version.msc3083_join_rules,
|
||||
lambda room_version: room_version.restricted_join_rule,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -386,6 +386,7 @@ def listen_unix(
|
|||
|
||||
|
||||
def listen_http(
|
||||
hs: "HomeServer",
|
||||
listener_config: ListenerConfig,
|
||||
root_resource: Resource,
|
||||
version_string: str,
|
||||
|
@ -406,6 +407,7 @@ def listen_http(
|
|||
version_string,
|
||||
max_request_body_size=max_request_body_size,
|
||||
reactor=reactor,
|
||||
hs=hs,
|
||||
)
|
||||
|
||||
if isinstance(listener_config, TCPListenerConfig):
|
||||
|
|
|
@ -221,6 +221,7 @@ class GenericWorkerServer(HomeServer):
|
|||
root_resource = create_resource_tree(resources, OptionsResource())
|
||||
|
||||
_base.listen_http(
|
||||
self,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
|
|
|
@ -139,6 +139,7 @@ class SynapseHomeServer(HomeServer):
|
|||
root_resource = OptionsResource()
|
||||
|
||||
ports = listen_http(
|
||||
self,
|
||||
listener_config,
|
||||
create_resource_tree(resources, root_resource),
|
||||
self.version_string,
|
||||
|
|
|
@ -31,7 +31,7 @@ class AuthConfig(Config):
|
|||
|
||||
# The default value of password_config.enabled is True, unless msc3861 is enabled.
|
||||
msc3861_enabled = (
|
||||
config.get("experimental_features", {})
|
||||
(config.get("experimental_features") or {})
|
||||
.get("msc3861", {})
|
||||
.get("enabled", False)
|
||||
)
|
||||
|
|
|
@ -382,9 +382,6 @@ class ExperimentalConfig(Config):
|
|||
# Check that none of the other config options conflict with MSC3861 when enabled
|
||||
self.msc3861.check_config_conflicts(self.root)
|
||||
|
||||
# MSC4009: E.164 Matrix IDs
|
||||
self.msc4009_e164_mxids = experimental.get("msc4009_e164_mxids", False)
|
||||
|
||||
# MSC4010: Do not allow setting m.push_rules account data.
|
||||
self.msc4010_push_rules_account_data = experimental.get(
|
||||
"msc4010_push_rules_account_data", False
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
import argparse
|
||||
import logging
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import attr
|
||||
from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
|
||||
|
@ -94,7 +94,7 @@ class ConfigModel(BaseModel):
|
|||
allow_mutation = False
|
||||
|
||||
|
||||
class InstanceLocationConfig(ConfigModel):
|
||||
class InstanceTcpLocationConfig(ConfigModel):
|
||||
"""The host and port to talk to an instance via HTTP replication."""
|
||||
|
||||
host: StrictStr
|
||||
|
@ -110,6 +110,23 @@ class InstanceLocationConfig(ConfigModel):
|
|||
return f"{self.host}:{self.port}"
|
||||
|
||||
|
||||
class InstanceUnixLocationConfig(ConfigModel):
|
||||
"""The socket file to talk to an instance via HTTP replication."""
|
||||
|
||||
path: StrictStr
|
||||
|
||||
def scheme(self) -> str:
|
||||
"""Hardcode a retrievable scheme"""
|
||||
return "unix"
|
||||
|
||||
def netloc(self) -> str:
|
||||
"""Nicely format the address location data"""
|
||||
return f"{self.path}"
|
||||
|
||||
|
||||
InstanceLocationConfig = Union[InstanceTcpLocationConfig, InstanceUnixLocationConfig]
|
||||
|
||||
|
||||
@attr.s
|
||||
class WriterLocations:
|
||||
"""Specifies the instances that write various streams.
|
||||
|
@ -154,6 +171,27 @@ class WriterLocations:
|
|||
)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class OutboundFederationRestrictedTo:
|
||||
"""Whether we limit outbound federation to a certain set of instances.
|
||||
|
||||
Attributes:
|
||||
instances: optional list of instances that can make outbound federation
|
||||
requests. If None then all instances can make federation requests.
|
||||
locations: list of instance locations to connect to proxy via.
|
||||
"""
|
||||
|
||||
instances: Optional[List[str]]
|
||||
locations: List[InstanceLocationConfig] = attr.Factory(list)
|
||||
|
||||
def __contains__(self, instance: str) -> bool:
|
||||
# It feels a bit dirty to return `True` if `instances` is `None`, but it makes
|
||||
# sense in downstream usage in the sense that if
|
||||
# `outbound_federation_restricted_to` is not configured, then any instance can
|
||||
# talk to federation (no restrictions so always return `True`).
|
||||
return self.instances is None or instance in self.instances
|
||||
|
||||
|
||||
class WorkerConfig(Config):
|
||||
"""The workers are processes run separately to the main synapse process.
|
||||
They have their own pid_file and listener configuration. They use the
|
||||
|
@ -270,9 +308,12 @@ class WorkerConfig(Config):
|
|||
% MAIN_PROCESS_INSTANCE_MAP_NAME
|
||||
)
|
||||
|
||||
# type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently
|
||||
self.instance_map: Dict[
|
||||
str, InstanceLocationConfig
|
||||
] = parse_and_validate_mapping(instance_map, InstanceLocationConfig)
|
||||
] = parse_and_validate_mapping(
|
||||
instance_map, InstanceLocationConfig # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
# Map from type of streams to source, c.f. WriterLocations.
|
||||
writers = config.get("stream_writers") or {}
|
||||
|
@ -365,6 +406,28 @@ class WorkerConfig(Config):
|
|||
new_option_name="update_user_directory_from_worker",
|
||||
)
|
||||
|
||||
outbound_federation_restricted_to = config.get(
|
||||
"outbound_federation_restricted_to", None
|
||||
)
|
||||
self.outbound_federation_restricted_to = OutboundFederationRestrictedTo(
|
||||
outbound_federation_restricted_to
|
||||
)
|
||||
if outbound_federation_restricted_to:
|
||||
if not self.worker_replication_secret:
|
||||
raise ConfigError(
|
||||
"`worker_replication_secret` must be configured when using `outbound_federation_restricted_to`."
|
||||
)
|
||||
|
||||
for instance in outbound_federation_restricted_to:
|
||||
if instance not in self.instance_map:
|
||||
raise ConfigError(
|
||||
"Instance %r is configured in 'outbound_federation_restricted_to' but does not appear in `instance_map` config."
|
||||
% (instance,)
|
||||
)
|
||||
self.outbound_federation_restricted_to.locations.append(
|
||||
self.instance_map[instance]
|
||||
)
|
||||
|
||||
def _should_this_worker_perform_duty(
|
||||
self,
|
||||
config: Dict[str, Any],
|
||||
|
|
|
@ -126,7 +126,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
|
|||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
is_invite_via_allow_rule = (
|
||||
event.room_version.msc3083_join_rules
|
||||
event.room_version.restricted_join_rule
|
||||
and event.type == EventTypes.Member
|
||||
and event.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in event.content
|
||||
|
@ -352,11 +352,9 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
|
|||
RoomVersions.V4,
|
||||
RoomVersions.V5,
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.V7,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
RoomVersions.MSC1767v10,
|
||||
}
|
||||
|
@ -449,7 +447,7 @@ def _check_create(event: "EventBase") -> None:
|
|||
|
||||
# 1.4 If content has no creator field, reject if the room version requires it.
|
||||
if (
|
||||
not event.room_version.msc2175_implicit_room_creator
|
||||
not event.room_version.implicit_room_creator
|
||||
and EventContentFields.ROOM_CREATOR not in event.content
|
||||
):
|
||||
raise AuthError(403, "Create event lacks a 'creator' property")
|
||||
|
@ -486,7 +484,7 @@ def _is_membership_change_allowed(
|
|||
key = (EventTypes.Create, "")
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_event_ids()[0] == create.event_id:
|
||||
if room_version.msc2175_implicit_room_creator:
|
||||
if room_version.implicit_room_creator:
|
||||
creator = create.sender
|
||||
else:
|
||||
creator = create.content[EventContentFields.ROOM_CREATOR]
|
||||
|
@ -509,7 +507,7 @@ def _is_membership_change_allowed(
|
|||
caller_invited = caller and caller.membership == Membership.INVITE
|
||||
caller_knocked = (
|
||||
caller
|
||||
and room_version.msc2403_knocking
|
||||
and room_version.knock_join_rule
|
||||
and caller.membership == Membership.KNOCK
|
||||
)
|
||||
|
||||
|
@ -609,9 +607,9 @@ def _is_membership_change_allowed(
|
|||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif (
|
||||
room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED
|
||||
room_version.restricted_join_rule and join_rule == JoinRules.RESTRICTED
|
||||
) or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
room_version.knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
# This is the same as public, but the event must contain a reference
|
||||
|
@ -641,9 +639,9 @@ def _is_membership_change_allowed(
|
|||
|
||||
elif (
|
||||
join_rule == JoinRules.INVITE
|
||||
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
|
||||
or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK)
|
||||
or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
room_version.knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
):
|
||||
|
@ -677,9 +675,9 @@ def _is_membership_change_allowed(
|
|||
"You don't have permission to ban",
|
||||
errcode=Codes.INSUFFICIENT_POWER,
|
||||
)
|
||||
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
|
||||
elif room_version.knock_join_rule and Membership.KNOCK == membership:
|
||||
if join_rule != JoinRules.KNOCK and (
|
||||
not room_version.msc3787_knock_restricted_join_rule
|
||||
not room_version.knock_restricted_join_rule
|
||||
or join_rule != JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
raise AuthError(403, "You don't have permission to knock")
|
||||
|
@ -836,7 +834,7 @@ def _check_power_levels(
|
|||
# Reject events with stringy power levels if required by room version
|
||||
if (
|
||||
event.type == EventTypes.PowerLevels
|
||||
and room_version_obj.msc3667_int_only_power_levels
|
||||
and room_version_obj.enforce_int_power_levels
|
||||
):
|
||||
for k, v in event.content.items():
|
||||
if k in {
|
||||
|
@ -972,7 +970,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
|
|||
key = (EventTypes.Create, "")
|
||||
create_event = auth_events.get(key)
|
||||
if create_event is not None:
|
||||
if create_event.room_version.msc2175_implicit_room_creator:
|
||||
if create_event.room_version.implicit_room_creator:
|
||||
creator = create_event.sender
|
||||
else:
|
||||
creator = create_event.content[EventContentFields.ROOM_CREATOR]
|
||||
|
@ -1110,7 +1108,7 @@ def auth_types_for_event(
|
|||
)
|
||||
auth_types.add(key)
|
||||
|
||||
if room_version.msc3083_join_rules and membership == Membership.JOIN:
|
||||
if room_version.restricted_join_rule and membership == Membership.JOIN:
|
||||
if EventContentFields.AUTHORISING_USER in event.content:
|
||||
key = (
|
||||
EventTypes.Member,
|
||||
|
|
|
@ -346,7 +346,7 @@ class EventBase(metaclass=abc.ABCMeta):
|
|||
@property
|
||||
def redacts(self) -> Optional[str]:
|
||||
"""MSC2176 moved the redacts field into the content."""
|
||||
if self.room_version.msc2176_redaction_rules:
|
||||
if self.room_version.updated_redaction_rules:
|
||||
return self.content.get("redacts")
|
||||
return self.get("redacts")
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ class EventBuilder:
|
|||
|
||||
# MSC2174 moves the redacts property to the content, it is invalid to
|
||||
# provide it as a top-level property.
|
||||
if self._redacts is not None and not self.room_version.msc2176_redaction_rules:
|
||||
if self._redacts is not None and not self.room_version.updated_redaction_rules:
|
||||
event_dict["redacts"] = self._redacts
|
||||
|
||||
if self._origin_server_ts is not None:
|
||||
|
|
|
@ -108,13 +108,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
|||
"origin_server_ts",
|
||||
]
|
||||
|
||||
# Room versions from before MSC2176 had additional allowed keys.
|
||||
if not room_version.msc2176_redaction_rules:
|
||||
allowed_keys.extend(["prev_state", "membership"])
|
||||
|
||||
# Room versions before MSC3989 kept the origin field.
|
||||
if not room_version.msc3989_redaction_rules:
|
||||
allowed_keys.append("origin")
|
||||
# Earlier room versions from had additional allowed keys.
|
||||
if not room_version.updated_redaction_rules:
|
||||
allowed_keys.extend(["prev_state", "membership", "origin"])
|
||||
|
||||
event_type = event_dict["type"]
|
||||
|
||||
|
@ -127,9 +123,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
|||
|
||||
if event_type == EventTypes.Member:
|
||||
add_fields("membership")
|
||||
if room_version.msc3375_redaction_rules:
|
||||
if room_version.restricted_join_rule_fix:
|
||||
add_fields(EventContentFields.AUTHORISING_USER)
|
||||
if room_version.msc3821_redaction_rules:
|
||||
if room_version.updated_redaction_rules:
|
||||
# Preserve the signed field under third_party_invite.
|
||||
third_party_invite = event_dict["content"].get("third_party_invite")
|
||||
if isinstance(third_party_invite, collections.abc.Mapping):
|
||||
|
@ -141,13 +137,13 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
|||
|
||||
elif event_type == EventTypes.Create:
|
||||
# MSC2176 rules state that create events cannot be redacted.
|
||||
if room_version.msc2176_redaction_rules:
|
||||
if room_version.updated_redaction_rules:
|
||||
return event_dict
|
||||
|
||||
add_fields("creator")
|
||||
elif event_type == EventTypes.JoinRules:
|
||||
add_fields("join_rule")
|
||||
if room_version.msc3083_join_rules:
|
||||
if room_version.restricted_join_rule:
|
||||
add_fields("allow")
|
||||
elif event_type == EventTypes.PowerLevels:
|
||||
add_fields(
|
||||
|
@ -161,14 +157,14 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
|
|||
"redact",
|
||||
)
|
||||
|
||||
if room_version.msc2176_redaction_rules:
|
||||
if room_version.updated_redaction_rules:
|
||||
add_fields("invite")
|
||||
|
||||
elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
|
||||
add_fields("aliases")
|
||||
elif event_type == EventTypes.RoomHistoryVisibility:
|
||||
add_fields("history_visibility")
|
||||
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
|
||||
elif event_type == EventTypes.Redaction and room_version.updated_redaction_rules:
|
||||
add_fields("redacts")
|
||||
|
||||
# Protect the rel_type and event_id fields under the m.relates_to field.
|
||||
|
@ -477,6 +473,15 @@ def serialize_event(
|
|||
if config.as_client_event:
|
||||
d = config.event_format(d)
|
||||
|
||||
# If the event is a redaction, copy the redacts field from the content to
|
||||
# top-level for backwards compatibility.
|
||||
if (
|
||||
e.type == EventTypes.Redaction
|
||||
and e.room_version.updated_redaction_rules
|
||||
and e.redacts is not None
|
||||
):
|
||||
d["redacts"] = e.redacts
|
||||
|
||||
only_event_fields = config.only_event_fields
|
||||
if only_event_fields:
|
||||
if not isinstance(only_event_fields, list) or not all(
|
||||
|
|
|
@ -231,7 +231,7 @@ async def _check_sigs_on_pdu(
|
|||
# If this is a join event for a restricted room it may have been authorised
|
||||
# via a different server from the sending server. Check those signatures.
|
||||
if (
|
||||
room_version.msc3083_join_rules
|
||||
room_version.restricted_join_rule
|
||||
and pdu.type == EventTypes.Member
|
||||
and pdu.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in pdu.content
|
||||
|
|
|
@ -983,7 +983,7 @@ class FederationClient(FederationBase):
|
|||
if not room_version:
|
||||
raise UnsupportedRoomVersionError()
|
||||
|
||||
if not room_version.msc2403_knocking and membership == Membership.KNOCK:
|
||||
if not room_version.knock_join_rule and membership == Membership.KNOCK:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"This room version does not support knocking",
|
||||
|
@ -1069,7 +1069,7 @@ class FederationClient(FederationBase):
|
|||
# * Ensure the signatures are good.
|
||||
#
|
||||
# Otherwise, fallback to the provided event.
|
||||
if room_version.msc3083_join_rules and response.event:
|
||||
if room_version.restricted_join_rule and response.event:
|
||||
event = response.event
|
||||
|
||||
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||
|
@ -1195,7 +1195,7 @@ class FederationClient(FederationBase):
|
|||
|
||||
# MSC3083 defines additional error codes for room joins.
|
||||
failover_errcodes = None
|
||||
if room_version.msc3083_join_rules:
|
||||
if room_version.restricted_join_rule:
|
||||
failover_errcodes = (
|
||||
Codes.UNABLE_AUTHORISE_JOIN,
|
||||
Codes.UNABLE_TO_GRANT_JOIN,
|
||||
|
|
|
@ -806,7 +806,7 @@ class FederationServer(FederationBase):
|
|||
raise IncompatibleRoomVersionError(room_version=room_version.identifier)
|
||||
|
||||
# Check that this room supports knocking as defined by its room version
|
||||
if not room_version.msc2403_knocking:
|
||||
if not room_version.knock_join_rule:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This room version does not support knocking",
|
||||
|
@ -909,7 +909,7 @@ class FederationServer(FederationBase):
|
|||
errcode=Codes.NOT_FOUND,
|
||||
)
|
||||
|
||||
if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
|
||||
if membership_type == Membership.KNOCK and not room_version.knock_join_rule:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This room version does not support knocking",
|
||||
|
@ -933,7 +933,7 @@ class FederationServer(FederationBase):
|
|||
# the event is valid to be sent into the room. Currently this is only done
|
||||
# if the user is being joined via restricted join rules.
|
||||
if (
|
||||
room_version.msc3083_join_rules
|
||||
room_version.restricted_join_rule
|
||||
and event.membership == Membership.JOIN
|
||||
and EventContentFields.AUTHORISING_USER in event.content
|
||||
):
|
||||
|
|
|
@ -432,15 +432,6 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
|
|||
|
||||
PREFIX = FEDERATION_V2_PREFIX
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
|
||||
async def on_PUT(
|
||||
self,
|
||||
origin: str,
|
||||
|
|
|
@ -277,7 +277,9 @@ class DirectoryHandler:
|
|||
except RequestSendFailed:
|
||||
raise SynapseError(502, "Failed to fetch alias")
|
||||
except CodeMessageException as e:
|
||||
logging.warning("Error retrieving alias")
|
||||
logging.warning(
|
||||
"Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
|
||||
)
|
||||
if e.code == 404:
|
||||
fed_result = None
|
||||
else:
|
||||
|
|
|
@ -277,7 +277,7 @@ class EventAuthHandler:
|
|||
True if the proper room version and join rules are set for restricted access.
|
||||
"""
|
||||
# This only applies to room versions which support the new join rule.
|
||||
if not room_version.msc3083_join_rules:
|
||||
if not room_version.restricted_join_rule:
|
||||
return False
|
||||
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
|
@ -292,7 +292,7 @@ class EventAuthHandler:
|
|||
return True
|
||||
|
||||
# also check for MSC3787 behaviour
|
||||
if room_version.msc3787_knock_restricted_join_rule:
|
||||
if room_version.knock_restricted_join_rule:
|
||||
return content_join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
|
||||
return False
|
||||
|
|
|
@ -957,7 +957,7 @@ class FederationHandler:
|
|||
# Note that this requires the /send_join request to come back to the
|
||||
# same server.
|
||||
prev_event_ids = None
|
||||
if room_version.msc3083_join_rules:
|
||||
if room_version.restricted_join_rule:
|
||||
# Note that the room's state can change out from under us and render our
|
||||
# nice join rules-conformant event non-conformant by the time we build the
|
||||
# event. When this happens, our validation at the end fails and we respond
|
||||
|
@ -1581,9 +1581,7 @@ class FederationHandler:
|
|||
event.content["third_party_invite"]["signed"]["token"],
|
||||
)
|
||||
original_invite = None
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
|
||||
)
|
||||
prev_state_ids = await context.get_prev_state_ids(StateFilter.from_types([key]))
|
||||
original_invite_id = prev_state_ids.get(key)
|
||||
if original_invite_id:
|
||||
original_invite = await self.store.get_event(
|
||||
|
@ -1636,7 +1634,7 @@ class FederationHandler:
|
|||
token = signed["token"]
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
|
||||
StateFilter.from_types([(EventTypes.ThirdPartyInvite, token)])
|
||||
)
|
||||
invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token))
|
||||
|
||||
|
|
|
@ -738,7 +738,7 @@ class EventCreationHandler:
|
|||
prev_event_id = state_map.get((EventTypes.Member, event.sender))
|
||||
else:
|
||||
prev_state_ids = await unpersisted_context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.Member, None)])
|
||||
StateFilter.from_types([(EventTypes.Member, event.sender)])
|
||||
)
|
||||
prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
|
||||
prev_event = (
|
||||
|
@ -860,7 +860,7 @@ class EventCreationHandler:
|
|||
return None
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(event.type, None)])
|
||||
StateFilter.from_types([(event.type, event.state_key)])
|
||||
)
|
||||
prev_event_id = prev_state_ids.get((event.type, event.state_key))
|
||||
if not prev_event_id:
|
||||
|
|
|
@ -95,13 +95,12 @@ bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time",
|
|||
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
|
||||
|
||||
notify_reason_counter = Counter(
|
||||
"synapse_handler_presence_notify_reason", "", ["reason"]
|
||||
"synapse_handler_presence_notify_reason", "", ["locality", "reason"]
|
||||
)
|
||||
state_transition_counter = Counter(
|
||||
"synapse_handler_presence_state_transition", "", ["from", "to"]
|
||||
"synapse_handler_presence_state_transition", "", ["locality", "from", "to"]
|
||||
)
|
||||
|
||||
|
||||
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
|
||||
# "currently_active"
|
||||
LAST_ACTIVE_GRANULARITY = 60 * 1000
|
||||
|
@ -567,8 +566,8 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
|||
for new_state in states:
|
||||
old_state = self.user_to_current_state.get(new_state.user_id)
|
||||
self.user_to_current_state[new_state.user_id] = new_state
|
||||
|
||||
if not old_state or should_notify(old_state, new_state):
|
||||
is_mine = self.is_mine_id(new_state.user_id)
|
||||
if not old_state or should_notify(old_state, new_state, is_mine):
|
||||
state_to_notify.append(new_state)
|
||||
|
||||
stream_id = token
|
||||
|
@ -1499,23 +1498,31 @@ class PresenceHandler(BasePresenceHandler):
|
|||
)
|
||||
|
||||
|
||||
def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool:
|
||||
def should_notify(
|
||||
old_state: UserPresenceState, new_state: UserPresenceState, is_mine: bool
|
||||
) -> bool:
|
||||
"""Decides if a presence state change should be sent to interested parties."""
|
||||
user_location = "remote"
|
||||
if is_mine:
|
||||
user_location = "local"
|
||||
|
||||
if old_state == new_state:
|
||||
return False
|
||||
|
||||
if old_state.status_msg != new_state.status_msg:
|
||||
notify_reason_counter.labels("status_msg_change").inc()
|
||||
notify_reason_counter.labels(user_location, "status_msg_change").inc()
|
||||
return True
|
||||
|
||||
if old_state.state != new_state.state:
|
||||
notify_reason_counter.labels("state_change").inc()
|
||||
state_transition_counter.labels(old_state.state, new_state.state).inc()
|
||||
notify_reason_counter.labels(user_location, "state_change").inc()
|
||||
state_transition_counter.labels(
|
||||
user_location, old_state.state, new_state.state
|
||||
).inc()
|
||||
return True
|
||||
|
||||
if old_state.state == PresenceState.ONLINE:
|
||||
if new_state.currently_active != old_state.currently_active:
|
||||
notify_reason_counter.labels("current_active_change").inc()
|
||||
notify_reason_counter.labels(user_location, "current_active_change").inc()
|
||||
return True
|
||||
|
||||
if (
|
||||
|
@ -1524,12 +1531,16 @@ def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) ->
|
|||
):
|
||||
# Only notify about last active bumps if we're not currently active
|
||||
if not new_state.currently_active:
|
||||
notify_reason_counter.labels("last_active_change_online").inc()
|
||||
notify_reason_counter.labels(
|
||||
user_location, "last_active_change_online"
|
||||
).inc()
|
||||
return True
|
||||
|
||||
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
||||
# Always notify for a transition where last active gets bumped.
|
||||
notify_reason_counter.labels("last_active_change_not_online").inc()
|
||||
notify_reason_counter.labels(
|
||||
user_location, "last_active_change_not_online"
|
||||
).inc()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -1989,7 +2000,7 @@ def handle_update(
|
|||
)
|
||||
|
||||
# Check whether the change was something worth notifying about
|
||||
if should_notify(prev_state, new_state):
|
||||
if should_notify(prev_state, new_state, is_mine):
|
||||
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
|
||||
persist_and_notify = True
|
||||
|
||||
|
|
|
@ -143,15 +143,10 @@ class RegistrationHandler:
|
|||
assigned_user_id: Optional[str] = None,
|
||||
inhibit_user_in_use_error: bool = False,
|
||||
) -> None:
|
||||
if types.contains_invalid_mxid_characters(
|
||||
localpart, self.hs.config.experimental.msc4009_e164_mxids
|
||||
):
|
||||
extra_chars = (
|
||||
"=_-./+" if self.hs.config.experimental.msc4009_e164_mxids else "=_-./"
|
||||
)
|
||||
if types.contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"User ID can only contain characters a-z, 0-9, or '{extra_chars}'",
|
||||
"User ID can only contain characters a-z, 0-9, or '=_-./+'",
|
||||
Codes.INVALID_USERNAME,
|
||||
)
|
||||
|
||||
|
|
|
@ -1116,7 +1116,7 @@ class RoomCreationHandler:
|
|||
preset_config, config = self._room_preset_config(room_config)
|
||||
|
||||
# MSC2175 removes the creator field from the create event.
|
||||
if not room_version.msc2175_implicit_room_creator:
|
||||
if not room_version.implicit_room_creator:
|
||||
creation_content["creator"] = creator_id
|
||||
creation_event, unpersisted_creation_context = await create_event(
|
||||
EventTypes.Create, creation_content, False
|
||||
|
|
|
@ -473,7 +473,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
)
|
||||
context = await unpersisted_context.persist(event)
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.Member, None)])
|
||||
StateFilter.from_types([(EventTypes.Member, user_id)])
|
||||
)
|
||||
|
||||
prev_member_event_id = prev_state_ids.get(
|
||||
|
@ -1354,7 +1354,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
requester = types.create_requester(target_user)
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.GuestAccess, None)])
|
||||
StateFilter.from_types([(EventTypes.GuestAccess, "")])
|
||||
)
|
||||
if event.membership == Membership.JOIN:
|
||||
if requester.is_guest:
|
||||
|
@ -1376,11 +1376,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
ratelimit=ratelimit,
|
||||
)
|
||||
|
||||
prev_member_event_id = prev_state_ids.get(
|
||||
(EventTypes.Member, event.state_key), None
|
||||
)
|
||||
|
||||
if event.membership == Membership.LEAVE:
|
||||
prev_state_ids = await context.get_prev_state_ids(
|
||||
StateFilter.from_types([(EventTypes.Member, event.state_key)])
|
||||
)
|
||||
prev_member_event_id = prev_state_ids.get(
|
||||
(EventTypes.Member, event.state_key), None
|
||||
)
|
||||
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||
if prev_member_event.membership == Membership.JOIN:
|
||||
|
|
|
@ -564,9 +564,9 @@ class RoomSummaryHandler:
|
|||
join_rule = join_rules_event.content.get("join_rule")
|
||||
if (
|
||||
join_rule == JoinRules.PUBLIC
|
||||
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
|
||||
or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK)
|
||||
or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
room_version.knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
):
|
||||
|
|
|
@ -27,9 +27,9 @@ from synapse.http.servlet import parse_string
|
|||
from synapse.http.site import SynapseRequest
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.types import (
|
||||
MXID_LOCALPART_ALLOWED_CHARACTERS,
|
||||
UserID,
|
||||
map_username_to_mxid_localpart,
|
||||
mxid_localpart_allowed_characters,
|
||||
)
|
||||
from synapse.util.iterutils import chunk_seq
|
||||
|
||||
|
@ -371,7 +371,7 @@ class SamlHandler:
|
|||
|
||||
|
||||
DOT_REPLACE_PATTERN = re.compile(
|
||||
"[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)
|
||||
"[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS)),)
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -225,8 +225,6 @@ class SsoHandler:
|
|||
|
||||
self._consent_at_registration = hs.config.consent.user_consent_at_registration
|
||||
|
||||
self._e164_mxids = hs.config.experimental.msc4009_e164_mxids
|
||||
|
||||
def register_identity_provider(self, p: SsoIdentityProvider) -> None:
|
||||
p_id = p.idp_id
|
||||
assert p_id not in self._identity_providers
|
||||
|
@ -713,7 +711,7 @@ class SsoHandler:
|
|||
# Since the localpart is provided via a potentially untrusted module,
|
||||
# ensure the MXID is valid before registering.
|
||||
if not attributes.localpart or contains_invalid_mxid_characters(
|
||||
attributes.localpart, self._e164_mxids
|
||||
attributes.localpart
|
||||
):
|
||||
raise MappingException("localpart is invalid: %s" % (attributes.localpart,))
|
||||
|
||||
|
@ -946,7 +944,7 @@ class SsoHandler:
|
|||
localpart,
|
||||
)
|
||||
|
||||
if contains_invalid_mxid_characters(localpart, self._e164_mxids):
|
||||
if contains_invalid_mxid_characters(localpart):
|
||||
raise SynapseError(400, "localpart is invalid: %s" % (localpart,))
|
||||
user_id = UserID(localpart, self._server_name).to_string()
|
||||
user_infos = await self._store.get_users_by_id_case_insensitive(user_id)
|
||||
|
|
|
@ -1037,7 +1037,12 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
|||
if reason.check(ResponseDone):
|
||||
self.deferred.callback(self.length)
|
||||
elif reason.check(PotentialDataLoss):
|
||||
# stolen from https://github.com/twisted/treq/pull/49/files
|
||||
# This applies to requests which don't set `Content-Length` or a
|
||||
# `Transfer-Encoding` in the response because in this case the end of the
|
||||
# response is indicated by the connection being closed, an event which may
|
||||
# also be due to a transient network problem or other error. But since this
|
||||
# behavior is expected of some servers (like YouTube), let's ignore it.
|
||||
# Stolen from https://github.com/twisted/treq/pull/49/files
|
||||
# http://twistedmatrix.com/trac/ticket/4840
|
||||
self.deferred.callback(self.length)
|
||||
else:
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import base64
|
||||
import logging
|
||||
from typing import Optional, Union
|
||||
|
@ -39,8 +40,14 @@ class ProxyConnectError(ConnectError):
|
|||
pass
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class ProxyCredentials:
|
||||
@abc.abstractmethod
|
||||
def as_proxy_authorization_value(self) -> bytes:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class BasicProxyCredentials(ProxyCredentials):
|
||||
username_password: bytes
|
||||
|
||||
def as_proxy_authorization_value(self) -> bytes:
|
||||
|
@ -55,6 +62,17 @@ class ProxyCredentials:
|
|||
return b"Basic " + base64.encodebytes(self.username_password)
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class BearerProxyCredentials(ProxyCredentials):
|
||||
access_token: bytes
|
||||
|
||||
def as_proxy_authorization_value(self) -> bytes:
|
||||
"""
|
||||
Return the value for a Proxy-Authorization header (i.e. 'Bearer xxx').
|
||||
"""
|
||||
return b"Bearer " + self.access_token
|
||||
|
||||
|
||||
@implementer(IStreamClientEndpoint)
|
||||
class HTTPConnectProxyEndpoint:
|
||||
"""An Endpoint implementation which will send a CONNECT request to an http proxy
|
||||
|
|
|
@ -50,7 +50,7 @@ from twisted.internet.interfaces import IReactorTime
|
|||
from twisted.internet.task import Cooperator
|
||||
from twisted.web.client import ResponseFailed
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IBodyProducer, IResponse
|
||||
from twisted.web.iweb import IAgent, IBodyProducer, IResponse
|
||||
|
||||
import synapse.metrics
|
||||
import synapse.util.retryutils
|
||||
|
@ -71,7 +71,9 @@ from synapse.http.client import (
|
|||
encode_query_args,
|
||||
read_body_with_max_size,
|
||||
)
|
||||
from synapse.http.connectproxyclient import BearerProxyCredentials
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.http.proxyagent import ProxyAgent
|
||||
from synapse.http.types import QueryParams
|
||||
from synapse.logging import opentracing
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
|
@ -393,17 +395,41 @@ class MatrixFederationHttpClient:
|
|||
if hs.config.server.user_agent_suffix:
|
||||
user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix)
|
||||
|
||||
federation_agent = MatrixFederationAgent(
|
||||
self.reactor,
|
||||
tls_client_options_factory,
|
||||
user_agent.encode("ascii"),
|
||||
hs.config.server.federation_ip_range_allowlist,
|
||||
hs.config.server.federation_ip_range_blocklist,
|
||||
outbound_federation_restricted_to = (
|
||||
hs.config.worker.outbound_federation_restricted_to
|
||||
)
|
||||
if hs.get_instance_name() in outbound_federation_restricted_to:
|
||||
# Talk to federation directly
|
||||
federation_agent: IAgent = MatrixFederationAgent(
|
||||
self.reactor,
|
||||
tls_client_options_factory,
|
||||
user_agent.encode("ascii"),
|
||||
hs.config.server.federation_ip_range_allowlist,
|
||||
hs.config.server.federation_ip_range_blocklist,
|
||||
)
|
||||
else:
|
||||
proxy_authorization_secret = hs.config.worker.worker_replication_secret
|
||||
assert (
|
||||
proxy_authorization_secret is not None
|
||||
), "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
|
||||
federation_proxy_credentials = BearerProxyCredentials(
|
||||
proxy_authorization_secret.encode("ascii")
|
||||
)
|
||||
|
||||
# We need to talk to federation via the proxy via one of the configured
|
||||
# locations
|
||||
federation_proxy_locations = outbound_federation_restricted_to.locations
|
||||
federation_agent = ProxyAgent(
|
||||
self.reactor,
|
||||
self.reactor,
|
||||
tls_client_options_factory,
|
||||
federation_proxy_locations=federation_proxy_locations,
|
||||
federation_proxy_credentials=federation_proxy_credentials,
|
||||
)
|
||||
|
||||
# Use a BlocklistingAgentWrapper to prevent circumventing the IP
|
||||
# blocking via IP literals in server names
|
||||
self.agent = BlocklistingAgentWrapper(
|
||||
self.agent: IAgent = BlocklistingAgentWrapper(
|
||||
federation_agent,
|
||||
ip_blocklist=hs.config.server.federation_ip_range_blocklist,
|
||||
)
|
||||
|
@ -412,7 +438,6 @@ class MatrixFederationHttpClient:
|
|||
self._store = hs.get_datastores().main
|
||||
self.version_string_bytes = hs.version_string.encode("ascii")
|
||||
self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000
|
||||
|
||||
self.max_long_retry_delay_seconds = (
|
||||
hs.config.federation.max_long_retry_delay_ms / 1000
|
||||
)
|
||||
|
@ -1131,6 +1156,101 @@ class MatrixFederationHttpClient:
|
|||
Succeeds when we get a 2xx HTTP response. The
|
||||
result will be the decoded JSON body.
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
"""
|
||||
json_dict, _ = await self.get_json_with_headers(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args=args,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
timeout=timeout,
|
||||
ignore_backoff=ignore_backoff,
|
||||
try_trailing_slash_on_400=try_trailing_slash_on_400,
|
||||
parser=parser,
|
||||
)
|
||||
return json_dict
|
||||
|
||||
@overload
|
||||
async def get_json_with_headers(
|
||||
self,
|
||||
destination: str,
|
||||
path: str,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
timeout: Optional[int] = None,
|
||||
ignore_backoff: bool = False,
|
||||
try_trailing_slash_on_400: bool = False,
|
||||
parser: Literal[None] = None,
|
||||
) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]:
|
||||
...
|
||||
|
||||
@overload
|
||||
async def get_json_with_headers(
|
||||
self,
|
||||
destination: str,
|
||||
path: str,
|
||||
args: Optional[QueryParams] = ...,
|
||||
retry_on_dns_fail: bool = ...,
|
||||
timeout: Optional[int] = ...,
|
||||
ignore_backoff: bool = ...,
|
||||
try_trailing_slash_on_400: bool = ...,
|
||||
parser: ByteParser[T] = ...,
|
||||
) -> Tuple[T, Dict[bytes, List[bytes]]]:
|
||||
...
|
||||
|
||||
async def get_json_with_headers(
|
||||
self,
|
||||
destination: str,
|
||||
path: str,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
timeout: Optional[int] = None,
|
||||
ignore_backoff: bool = False,
|
||||
try_trailing_slash_on_400: bool = False,
|
||||
parser: Optional[ByteParser[T]] = None,
|
||||
) -> Tuple[Union[JsonDict, T], Dict[bytes, List[bytes]]]:
|
||||
"""GETs some json from the given host homeserver and path
|
||||
|
||||
Args:
|
||||
destination: The remote server to send the HTTP request to.
|
||||
|
||||
path: The HTTP path.
|
||||
|
||||
args: A dictionary used to create query strings, defaults to
|
||||
None.
|
||||
|
||||
retry_on_dns_fail: true if the request should be retried on DNS failures
|
||||
|
||||
timeout: number of milliseconds to wait for the response.
|
||||
self._default_timeout (60s) by default.
|
||||
|
||||
Note that we may make several attempts to send the request; this
|
||||
timeout applies to the time spent waiting for response headers for
|
||||
*each* attempt (including connection time) as well as the time spent
|
||||
reading the response body after a 200 response.
|
||||
|
||||
ignore_backoff: true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
|
||||
try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
|
||||
response we should try appending a trailing slash to the end of
|
||||
the request. Workaround for #3622 in Synapse <= v0.99.3.
|
||||
|
||||
parser: The parser to use to decode the response. Defaults to
|
||||
parsing as JSON.
|
||||
|
||||
Returns:
|
||||
Succeeds when we get a 2xx HTTP response. The result will be a tuple of the
|
||||
decoded JSON body and a dict of the response headers.
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
|
@ -1156,6 +1276,8 @@ class MatrixFederationHttpClient:
|
|||
timeout=timeout,
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
if timeout is not None:
|
||||
_sec_timeout = timeout / 1000
|
||||
else:
|
||||
|
@ -1173,7 +1295,7 @@ class MatrixFederationHttpClient:
|
|||
parser=parser,
|
||||
)
|
||||
|
||||
return body
|
||||
return body, headers
|
||||
|
||||
async def delete_json(
|
||||
self,
|
||||
|
|
|
@ -0,0 +1,283 @@
|
|||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import json
|
||||
import logging
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, Any, Optional, Set, Tuple, cast
|
||||
|
||||
from twisted.internet import protocol
|
||||
from twisted.internet.interfaces import ITCPTransport
|
||||
from twisted.internet.protocol import connectionDone
|
||||
from twisted.python import failure
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.client import ResponseDone
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IResponse
|
||||
from twisted.web.resource import IResource
|
||||
from twisted.web.server import Request, Site
|
||||
|
||||
from synapse.api.errors import Codes, InvalidProxyCredentialsError
|
||||
from synapse.http import QuieterFileBodyProducer
|
||||
from synapse.http.server import _AsyncResource
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import ISynapseReactor
|
||||
from synapse.util.async_helpers import timeout_deferred
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616
|
||||
# section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be
|
||||
# consumed by the immediate recipient and not be forwarded on.
|
||||
HOP_BY_HOP_HEADERS = {
|
||||
"Connection",
|
||||
"Keep-Alive",
|
||||
"Proxy-Authenticate",
|
||||
"Proxy-Authorization",
|
||||
"TE",
|
||||
"Trailers",
|
||||
"Transfer-Encoding",
|
||||
"Upgrade",
|
||||
}
|
||||
|
||||
|
||||
def parse_connection_header_value(
|
||||
connection_header_value: Optional[bytes],
|
||||
) -> Set[str]:
|
||||
"""
|
||||
Parse the `Connection` header to determine which headers we should not be copied
|
||||
over from the remote response.
|
||||
|
||||
As defined by RFC2616 section 14.10 and RFC9110 section 7.6.1
|
||||
|
||||
Example: `Connection: close, X-Foo, X-Bar` will return `{"Close", "X-Foo", "X-Bar"}`
|
||||
|
||||
Even though "close" is a special directive, let's just treat it as just another
|
||||
header for simplicity. If people want to check for this directive, they can simply
|
||||
check for `"Close" in headers`.
|
||||
|
||||
Args:
|
||||
connection_header_value: The value of the `Connection` header.
|
||||
|
||||
Returns:
|
||||
The set of header names that should not be copied over from the remote response.
|
||||
The keys are capitalized in canonical capitalization.
|
||||
"""
|
||||
headers = Headers()
|
||||
extra_headers_to_remove: Set[str] = set()
|
||||
if connection_header_value:
|
||||
extra_headers_to_remove = {
|
||||
headers._canonicalNameCaps(connection_option.strip()).decode("ascii")
|
||||
for connection_option in connection_header_value.split(b",")
|
||||
}
|
||||
|
||||
return extra_headers_to_remove
|
||||
|
||||
|
||||
class ProxyResource(_AsyncResource):
|
||||
"""
|
||||
A stub resource that proxies any requests with a `matrix-federation://` scheme
|
||||
through the given `federation_agent` to the remote homeserver and ferries back the
|
||||
info.
|
||||
"""
|
||||
|
||||
isLeaf = True
|
||||
|
||||
def __init__(self, reactor: ISynapseReactor, hs: "HomeServer"):
|
||||
super().__init__(True)
|
||||
|
||||
self.reactor = reactor
|
||||
self.agent = hs.get_federation_http_client().agent
|
||||
|
||||
self._proxy_authorization_secret = hs.config.worker.worker_replication_secret
|
||||
|
||||
def _check_auth(self, request: Request) -> None:
|
||||
# The `matrix-federation://` proxy functionality can only be used with auth.
|
||||
# Protect homserver admins forgetting to configure a secret.
|
||||
assert self._proxy_authorization_secret is not None
|
||||
|
||||
# Get the authorization header.
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Proxy-Authorization")
|
||||
|
||||
if not auth_headers:
|
||||
raise InvalidProxyCredentialsError(
|
||||
"Missing Proxy-Authorization header.", Codes.MISSING_TOKEN
|
||||
)
|
||||
if len(auth_headers) > 1:
|
||||
raise InvalidProxyCredentialsError(
|
||||
"Too many Proxy-Authorization headers.", Codes.UNAUTHORIZED
|
||||
)
|
||||
parts = auth_headers[0].split(b" ")
|
||||
if parts[0] == b"Bearer" and len(parts) == 2:
|
||||
received_secret = parts[1].decode("ascii")
|
||||
if self._proxy_authorization_secret == received_secret:
|
||||
# Success!
|
||||
return
|
||||
|
||||
raise InvalidProxyCredentialsError(
|
||||
"Invalid Proxy-Authorization header.", Codes.UNAUTHORIZED
|
||||
)
|
||||
|
||||
async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]:
|
||||
uri = urllib.parse.urlparse(request.uri)
|
||||
assert uri.scheme == b"matrix-federation"
|
||||
|
||||
# Check the authorization headers before handling the request.
|
||||
self._check_auth(request)
|
||||
|
||||
headers = Headers()
|
||||
for header_name in (b"User-Agent", b"Authorization", b"Content-Type"):
|
||||
header_value = request.getHeader(header_name)
|
||||
if header_value:
|
||||
headers.addRawHeader(header_name, header_value)
|
||||
|
||||
request_deferred = run_in_background(
|
||||
self.agent.request,
|
||||
request.method,
|
||||
request.uri,
|
||||
headers=headers,
|
||||
bodyProducer=QuieterFileBodyProducer(request.content),
|
||||
)
|
||||
request_deferred = timeout_deferred(
|
||||
request_deferred,
|
||||
# This should be set longer than the timeout in `MatrixFederationHttpClient`
|
||||
# so that it has enough time to complete and pass us the data before we give
|
||||
# up.
|
||||
timeout=90,
|
||||
reactor=self.reactor,
|
||||
)
|
||||
|
||||
response = await make_deferred_yieldable(request_deferred)
|
||||
|
||||
return response.code, response
|
||||
|
||||
def _send_response(
|
||||
self,
|
||||
request: "SynapseRequest",
|
||||
code: int,
|
||||
response_object: Any,
|
||||
) -> None:
|
||||
response = cast(IResponse, response_object)
|
||||
response_headers = cast(Headers, response.headers)
|
||||
|
||||
request.setResponseCode(code)
|
||||
|
||||
# The `Connection` header also defines which headers should not be copied over.
|
||||
connection_header = response_headers.getRawHeaders(b"connection")
|
||||
extra_headers_to_remove = parse_connection_header_value(
|
||||
connection_header[0] if connection_header else None
|
||||
)
|
||||
|
||||
# Copy headers.
|
||||
for k, v in response_headers.getAllRawHeaders():
|
||||
# Do not copy over any hop-by-hop headers. These are meant to only be
|
||||
# consumed by the immediate recipient and not be forwarded on.
|
||||
header_key = k.decode("ascii")
|
||||
if (
|
||||
header_key in HOP_BY_HOP_HEADERS
|
||||
or header_key in extra_headers_to_remove
|
||||
):
|
||||
continue
|
||||
|
||||
request.responseHeaders.setRawHeaders(k, v)
|
||||
|
||||
response.deliverBody(_ProxyResponseBody(request))
|
||||
|
||||
def _send_error_response(
|
||||
self,
|
||||
f: failure.Failure,
|
||||
request: "SynapseRequest",
|
||||
) -> None:
|
||||
if isinstance(f.value, InvalidProxyCredentialsError):
|
||||
error_response_code = f.value.code
|
||||
error_response_json = {"errcode": f.value.errcode, "err": f.value.msg}
|
||||
else:
|
||||
error_response_code = 502
|
||||
error_response_json = {
|
||||
"errcode": Codes.UNKNOWN,
|
||||
"err": "ProxyResource: Error when proxying request: %s %s -> %s"
|
||||
% (
|
||||
request.method.decode("ascii"),
|
||||
request.uri.decode("ascii"),
|
||||
f,
|
||||
),
|
||||
}
|
||||
|
||||
request.setResponseCode(error_response_code)
|
||||
request.setHeader(b"Content-Type", b"application/json")
|
||||
request.write((json.dumps(error_response_json)).encode())
|
||||
request.finish()
|
||||
|
||||
|
||||
class _ProxyResponseBody(protocol.Protocol):
|
||||
"""
|
||||
A protocol that proxies the given remote response data back out to the given local
|
||||
request.
|
||||
"""
|
||||
|
||||
transport: Optional[ITCPTransport] = None
|
||||
|
||||
def __init__(self, request: "SynapseRequest") -> None:
|
||||
self._request = request
|
||||
|
||||
def dataReceived(self, data: bytes) -> None:
|
||||
# Avoid sending response data to the local request that already disconnected
|
||||
if self._request._disconnected and self.transport is not None:
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
self.transport.abortConnection()
|
||||
return
|
||||
|
||||
self._request.write(data)
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
# If the local request is already finished (successfully or failed), don't
|
||||
# worry about sending anything back.
|
||||
if self._request.finished:
|
||||
return
|
||||
|
||||
if reason.check(ResponseDone):
|
||||
self._request.finish()
|
||||
else:
|
||||
# Abort the underlying request since our remote request also failed.
|
||||
self._request.transport.abortConnection()
|
||||
|
||||
|
||||
class ProxySite(Site):
|
||||
"""
|
||||
Proxies any requests with a `matrix-federation://` scheme through the given
|
||||
`federation_agent`. Otherwise, behaves like a normal `Site`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
resource: IResource,
|
||||
reactor: ISynapseReactor,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(resource, reactor=reactor)
|
||||
|
||||
self._proxy_resource = ProxyResource(reactor, hs=hs)
|
||||
|
||||
def getResourceFor(self, request: "SynapseRequest") -> IResource:
|
||||
uri = urllib.parse.urlparse(request.uri)
|
||||
if uri.scheme == b"matrix-federation":
|
||||
return self._proxy_resource
|
||||
|
||||
return super().getResourceFor(request)
|
|
@ -12,8 +12,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple
|
||||
from urllib.parse import urlparse
|
||||
from urllib.request import ( # type: ignore[attr-defined]
|
||||
getproxies_environment,
|
||||
|
@ -23,8 +24,17 @@ from urllib.request import ( # type: ignore[attr-defined]
|
|||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
|
||||
from twisted.internet.endpoints import (
|
||||
HostnameEndpoint,
|
||||
UNIXClientEndpoint,
|
||||
wrapClientTLS,
|
||||
)
|
||||
from twisted.internet.interfaces import (
|
||||
IProtocol,
|
||||
IProtocolFactory,
|
||||
IReactorCore,
|
||||
IStreamClientEndpoint,
|
||||
)
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.client import (
|
||||
URI,
|
||||
|
@ -36,8 +46,18 @@ from twisted.web.error import SchemeNotSupported
|
|||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
|
||||
|
||||
from synapse.config.workers import (
|
||||
InstanceLocationConfig,
|
||||
InstanceTcpLocationConfig,
|
||||
InstanceUnixLocationConfig,
|
||||
)
|
||||
from synapse.http import redact_uri
|
||||
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
|
||||
from synapse.http.connectproxyclient import (
|
||||
BasicProxyCredentials,
|
||||
HTTPConnectProxyEndpoint,
|
||||
ProxyCredentials,
|
||||
)
|
||||
from synapse.logging.context import run_in_background
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -74,6 +94,14 @@ class ProxyAgent(_AgentBase):
|
|||
use_proxy: Whether proxy settings should be discovered and used
|
||||
from conventional environment variables.
|
||||
|
||||
federation_proxy_locations: An optional list of locations to proxy outbound federation
|
||||
traffic through (only requests that use the `matrix-federation://` scheme
|
||||
will be proxied).
|
||||
|
||||
federation_proxy_credentials: Required if `federation_proxy_locations` is set. The
|
||||
credentials to use when proxying outbound federation traffic through another
|
||||
worker.
|
||||
|
||||
Raises:
|
||||
ValueError if use_proxy is set and the environment variables
|
||||
contain an invalid proxy specification.
|
||||
|
@ -89,6 +117,8 @@ class ProxyAgent(_AgentBase):
|
|||
bindAddress: Optional[bytes] = None,
|
||||
pool: Optional[HTTPConnectionPool] = None,
|
||||
use_proxy: bool = False,
|
||||
federation_proxy_locations: Collection[InstanceLocationConfig] = (),
|
||||
federation_proxy_credentials: Optional[ProxyCredentials] = None,
|
||||
):
|
||||
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
|
||||
|
||||
|
@ -127,6 +157,47 @@ class ProxyAgent(_AgentBase):
|
|||
self._policy_for_https = contextFactory
|
||||
self._reactor = reactor
|
||||
|
||||
self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None
|
||||
self._federation_proxy_credentials: Optional[ProxyCredentials] = None
|
||||
if federation_proxy_locations:
|
||||
assert (
|
||||
federation_proxy_credentials is not None
|
||||
), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
|
||||
|
||||
endpoints: List[IStreamClientEndpoint] = []
|
||||
for federation_proxy_location in federation_proxy_locations:
|
||||
endpoint: IStreamClientEndpoint
|
||||
if isinstance(federation_proxy_location, InstanceTcpLocationConfig):
|
||||
endpoint = HostnameEndpoint(
|
||||
self.proxy_reactor,
|
||||
federation_proxy_location.host,
|
||||
federation_proxy_location.port,
|
||||
)
|
||||
if federation_proxy_location.tls:
|
||||
tls_connection_creator = (
|
||||
self._policy_for_https.creatorForNetloc(
|
||||
federation_proxy_location.host.encode("utf-8"),
|
||||
federation_proxy_location.port,
|
||||
)
|
||||
)
|
||||
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
|
||||
|
||||
elif isinstance(federation_proxy_location, InstanceUnixLocationConfig):
|
||||
endpoint = UNIXClientEndpoint(
|
||||
self.proxy_reactor, federation_proxy_location.path
|
||||
)
|
||||
|
||||
else:
|
||||
# It is supremely unlikely we ever hit this
|
||||
raise SchemeNotSupported(
|
||||
f"Unknown type of Endpoint requested, check {federation_proxy_location}"
|
||||
)
|
||||
|
||||
endpoints.append(endpoint)
|
||||
|
||||
self._federation_proxy_endpoint = _RandomSampleEndpoints(endpoints)
|
||||
self._federation_proxy_credentials = federation_proxy_credentials
|
||||
|
||||
def request(
|
||||
self,
|
||||
method: bytes,
|
||||
|
@ -214,6 +285,25 @@ class ProxyAgent(_AgentBase):
|
|||
parsed_uri.port,
|
||||
self.https_proxy_creds,
|
||||
)
|
||||
elif (
|
||||
parsed_uri.scheme == b"matrix-federation"
|
||||
and self._federation_proxy_endpoint
|
||||
):
|
||||
assert (
|
||||
self._federation_proxy_credentials is not None
|
||||
), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
|
||||
|
||||
# Set a Proxy-Authorization header
|
||||
if headers is None:
|
||||
headers = Headers()
|
||||
# We always need authentication for the outbound federation proxy
|
||||
headers.addRawHeader(
|
||||
b"Proxy-Authorization",
|
||||
self._federation_proxy_credentials.as_proxy_authorization_value(),
|
||||
)
|
||||
|
||||
endpoint = self._federation_proxy_endpoint
|
||||
request_path = uri
|
||||
else:
|
||||
# not using a proxy
|
||||
endpoint = HostnameEndpoint(
|
||||
|
@ -233,6 +323,11 @@ class ProxyAgent(_AgentBase):
|
|||
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
|
||||
elif parsed_uri.scheme == b"http":
|
||||
pass
|
||||
elif (
|
||||
parsed_uri.scheme == b"matrix-federation"
|
||||
and self._federation_proxy_endpoint
|
||||
):
|
||||
pass
|
||||
else:
|
||||
return defer.fail(
|
||||
Failure(
|
||||
|
@ -334,6 +429,42 @@ def parse_proxy(
|
|||
|
||||
credentials = None
|
||||
if url.username and url.password:
|
||||
credentials = ProxyCredentials(b"".join([url.username, b":", url.password]))
|
||||
credentials = BasicProxyCredentials(
|
||||
b"".join([url.username, b":", url.password])
|
||||
)
|
||||
|
||||
return url.scheme, url.hostname, url.port or default_port, credentials
|
||||
|
||||
|
||||
@implementer(IStreamClientEndpoint)
|
||||
class _RandomSampleEndpoints:
|
||||
"""An endpoint that randomly iterates through a given list of endpoints at
|
||||
each connection attempt.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
endpoints: Sequence[IStreamClientEndpoint],
|
||||
) -> None:
|
||||
assert endpoints
|
||||
self._endpoints = endpoints
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<_RandomSampleEndpoints endpoints={self._endpoints}>"
|
||||
|
||||
def connect(
|
||||
self, protocol_factory: IProtocolFactory
|
||||
) -> "defer.Deferred[IProtocol]":
|
||||
"""Implements IStreamClientEndpoint interface"""
|
||||
|
||||
return run_in_background(self._do_connect, protocol_factory)
|
||||
|
||||
async def _do_connect(self, protocol_factory: IProtocolFactory) -> IProtocol:
|
||||
failures: List[Failure] = []
|
||||
for endpoint in random.sample(self._endpoints, k=len(self._endpoints)):
|
||||
try:
|
||||
return await endpoint.connect(protocol_factory)
|
||||
except Exception:
|
||||
failures.append(Failure())
|
||||
|
||||
failures.pop().raiseException()
|
||||
|
|
|
@ -18,7 +18,11 @@ from typing import Dict, Optional
|
|||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
|
||||
from twisted.internet.endpoints import (
|
||||
HostnameEndpoint,
|
||||
UNIXClientEndpoint,
|
||||
wrapClientTLS,
|
||||
)
|
||||
from twisted.internet.interfaces import IStreamClientEndpoint
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.client import URI, HTTPConnectionPool, _AgentBase
|
||||
|
@ -32,7 +36,11 @@ from twisted.web.iweb import (
|
|||
IResponse,
|
||||
)
|
||||
|
||||
from synapse.config.workers import InstanceLocationConfig
|
||||
from synapse.config.workers import (
|
||||
InstanceLocationConfig,
|
||||
InstanceTcpLocationConfig,
|
||||
InstanceUnixLocationConfig,
|
||||
)
|
||||
from synapse.types import ISynapseReactor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -40,7 +48,7 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
@implementer(IAgentEndpointFactory)
|
||||
class ReplicationEndpointFactory:
|
||||
"""Connect to a given TCP socket"""
|
||||
"""Connect to a given TCP or UNIX socket"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -64,24 +72,27 @@ class ReplicationEndpointFactory:
|
|||
# The given URI has a special scheme and includes the worker name. The
|
||||
# actual connection details are pulled from the instance map.
|
||||
worker_name = uri.netloc.decode("utf-8")
|
||||
scheme = self.instance_map[worker_name].scheme()
|
||||
location_config = self.instance_map[worker_name]
|
||||
scheme = location_config.scheme()
|
||||
|
||||
if scheme in ("http", "https"):
|
||||
if isinstance(location_config, InstanceTcpLocationConfig):
|
||||
endpoint = HostnameEndpoint(
|
||||
self.reactor,
|
||||
self.instance_map[worker_name].host,
|
||||
self.instance_map[worker_name].port,
|
||||
location_config.host,
|
||||
location_config.port,
|
||||
)
|
||||
if scheme == "https":
|
||||
endpoint = wrapClientTLS(
|
||||
# The 'port' argument below isn't actually used by the function
|
||||
self.context_factory.creatorForNetloc(
|
||||
self.instance_map[worker_name].host.encode("utf-8"),
|
||||
self.instance_map[worker_name].port,
|
||||
location_config.host.encode("utf-8"),
|
||||
location_config.port,
|
||||
),
|
||||
endpoint,
|
||||
)
|
||||
return endpoint
|
||||
elif isinstance(location_config, InstanceUnixLocationConfig):
|
||||
return UNIXClientEndpoint(self.reactor, location_config.path)
|
||||
else:
|
||||
raise SchemeNotSupported(f"Unsupported scheme: {scheme}")
|
||||
|
||||
|
@ -138,13 +149,16 @@ class ReplicationAgent(_AgentBase):
|
|||
An existing connection from the connection pool may be used or a new
|
||||
one may be created.
|
||||
|
||||
Currently, HTTP and HTTPS schemes are supported in uri.
|
||||
Currently, HTTP, HTTPS and UNIX schemes are supported in uri.
|
||||
|
||||
This is copied from twisted.web.client.Agent, except:
|
||||
|
||||
* It uses a different pool key (combining the host & port).
|
||||
* It does not call _ensureValidURI(...) since it breaks on some
|
||||
UNIX paths.
|
||||
* It uses a different pool key (combining the scheme with either host & port or
|
||||
socket path).
|
||||
* It does not call _ensureValidURI(...) as the strictness of IDNA2008 is not
|
||||
required when using a worker's name as a 'hostname' for Synapse HTTP
|
||||
Replication machinery. Specifically, this allows a range of ascii characters
|
||||
such as '+' and '_' in hostnames/worker's names.
|
||||
|
||||
See: twisted.web.iweb.IAgent.request
|
||||
"""
|
||||
|
@ -154,9 +168,12 @@ class ReplicationAgent(_AgentBase):
|
|||
except SchemeNotSupported:
|
||||
return defer.fail(Failure())
|
||||
|
||||
worker_name = parsedURI.netloc.decode("utf-8")
|
||||
key_scheme = self._endpointFactory.instance_map[worker_name].scheme()
|
||||
key_netloc = self._endpointFactory.instance_map[worker_name].netloc()
|
||||
# This sets the Pool key to be:
|
||||
# (http(s), <host:ip>)
|
||||
key = (parsedURI.scheme, parsedURI.netloc)
|
||||
# (http(s), <host:port>) or (unix, <socket_path>)
|
||||
key = (key_scheme, key_netloc)
|
||||
|
||||
# _requestWithEndpoint comes from _AgentBase class
|
||||
return self._requestWithEndpoint(
|
||||
|
|
|
@ -18,6 +18,7 @@ import html
|
|||
import logging
|
||||
import types
|
||||
import urllib
|
||||
import urllib.parse
|
||||
from http import HTTPStatus
|
||||
from http.client import FOUND
|
||||
from inspect import isawaitable
|
||||
|
@ -65,7 +66,6 @@ from synapse.api.errors import (
|
|||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
|
||||
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
|
||||
from synapse.util import json_encoder
|
||||
|
@ -76,6 +76,7 @@ from synapse.util.iterutils import chunk_seq
|
|||
if TYPE_CHECKING:
|
||||
import opentracing
|
||||
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -102,7 +103,7 @@ HTTP_STATUS_REQUEST_CANCELLED = 499
|
|||
|
||||
|
||||
def return_json_error(
|
||||
f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig]
|
||||
f: failure.Failure, request: "SynapseRequest", config: Optional[HomeServerConfig]
|
||||
) -> None:
|
||||
"""Sends a JSON error response to clients."""
|
||||
|
||||
|
@ -220,8 +221,8 @@ def return_html_error(
|
|||
|
||||
|
||||
def wrap_async_request_handler(
|
||||
h: Callable[["_AsyncResource", SynapseRequest], Awaitable[None]]
|
||||
) -> Callable[["_AsyncResource", SynapseRequest], "defer.Deferred[None]"]:
|
||||
h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]]
|
||||
) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]:
|
||||
"""Wraps an async request handler so that it calls request.processing.
|
||||
|
||||
This helps ensure that work done by the request handler after the request is completed
|
||||
|
@ -235,7 +236,7 @@ def wrap_async_request_handler(
|
|||
"""
|
||||
|
||||
async def wrapped_async_request_handler(
|
||||
self: "_AsyncResource", request: SynapseRequest
|
||||
self: "_AsyncResource", request: "SynapseRequest"
|
||||
) -> None:
|
||||
with request.processing():
|
||||
await h(self, request)
|
||||
|
@ -300,7 +301,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
|||
|
||||
self._extract_context = extract_context
|
||||
|
||||
def render(self, request: SynapseRequest) -> int:
|
||||
def render(self, request: "SynapseRequest") -> int:
|
||||
"""This gets called by twisted every time someone sends us a request."""
|
||||
request.render_deferred = defer.ensureDeferred(
|
||||
self._async_render_wrapper(request)
|
||||
|
@ -308,7 +309,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
|||
return NOT_DONE_YET
|
||||
|
||||
@wrap_async_request_handler
|
||||
async def _async_render_wrapper(self, request: SynapseRequest) -> None:
|
||||
async def _async_render_wrapper(self, request: "SynapseRequest") -> None:
|
||||
"""This is a wrapper that delegates to `_async_render` and handles
|
||||
exceptions, return values, metrics, etc.
|
||||
"""
|
||||
|
@ -326,9 +327,15 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
|||
# of our stack, and thus gives us a sensible stack
|
||||
# trace.
|
||||
f = failure.Failure()
|
||||
logger.exception(
|
||||
"Error handling request",
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
self._send_error_response(f, request)
|
||||
|
||||
async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, Any]]:
|
||||
async def _async_render(
|
||||
self, request: "SynapseRequest"
|
||||
) -> Optional[Tuple[int, Any]]:
|
||||
"""Delegates to `_async_render_<METHOD>` methods, or returns a 400 if
|
||||
no appropriate method exists. Can be overridden in sub classes for
|
||||
different routing.
|
||||
|
@ -358,7 +365,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
|||
@abc.abstractmethod
|
||||
def _send_response(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
code: int,
|
||||
response_object: Any,
|
||||
) -> None:
|
||||
|
@ -368,7 +375,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
|
|||
def _send_error_response(
|
||||
self,
|
||||
f: failure.Failure,
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -384,7 +391,7 @@ class DirectServeJsonResource(_AsyncResource):
|
|||
|
||||
def _send_response(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
code: int,
|
||||
response_object: Any,
|
||||
) -> None:
|
||||
|
@ -401,7 +408,7 @@ class DirectServeJsonResource(_AsyncResource):
|
|||
def _send_error_response(
|
||||
self,
|
||||
f: failure.Failure,
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
) -> None:
|
||||
"""Implements _AsyncResource._send_error_response"""
|
||||
return_json_error(f, request, None)
|
||||
|
@ -473,7 +480,7 @@ class JsonResource(DirectServeJsonResource):
|
|||
)
|
||||
|
||||
def _get_handler_for_request(
|
||||
self, request: SynapseRequest
|
||||
self, request: "SynapseRequest"
|
||||
) -> Tuple[ServletCallback, str, Dict[str, str]]:
|
||||
"""Finds a callback method to handle the given request.
|
||||
|
||||
|
@ -503,7 +510,7 @@ class JsonResource(DirectServeJsonResource):
|
|||
# Huh. No one wanted to handle that? Fiiiiiine.
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]:
|
||||
async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]:
|
||||
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
|
||||
|
||||
request.is_render_cancellable = is_function_cancellable(callback)
|
||||
|
@ -535,7 +542,7 @@ class JsonResource(DirectServeJsonResource):
|
|||
def _send_error_response(
|
||||
self,
|
||||
f: failure.Failure,
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
) -> None:
|
||||
"""Implements _AsyncResource._send_error_response"""
|
||||
return_json_error(f, request, self.hs.config)
|
||||
|
@ -551,7 +558,7 @@ class DirectServeHtmlResource(_AsyncResource):
|
|||
|
||||
def _send_response(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
code: int,
|
||||
response_object: Any,
|
||||
) -> None:
|
||||
|
@ -565,7 +572,7 @@ class DirectServeHtmlResource(_AsyncResource):
|
|||
def _send_error_response(
|
||||
self,
|
||||
f: failure.Failure,
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
) -> None:
|
||||
"""Implements _AsyncResource._send_error_response"""
|
||||
return_html_error(f, request, self.ERROR_TEMPLATE)
|
||||
|
@ -592,7 +599,7 @@ class UnrecognizedRequestResource(resource.Resource):
|
|||
errcode of M_UNRECOGNIZED.
|
||||
"""
|
||||
|
||||
def render(self, request: SynapseRequest) -> int:
|
||||
def render(self, request: "SynapseRequest") -> int:
|
||||
f = failure.Failure(UnrecognizedRequestError(code=404))
|
||||
return_json_error(f, request, None)
|
||||
# A response has already been sent but Twisted requires either NOT_DONE_YET
|
||||
|
@ -622,7 +629,7 @@ class RootRedirect(resource.Resource):
|
|||
class OptionsResource(resource.Resource):
|
||||
"""Responds to OPTION requests for itself and all children."""
|
||||
|
||||
def render_OPTIONS(self, request: SynapseRequest) -> bytes:
|
||||
def render_OPTIONS(self, request: "SynapseRequest") -> bytes:
|
||||
request.setResponseCode(204)
|
||||
request.setHeader(b"Content-Length", b"0")
|
||||
|
||||
|
@ -737,7 +744,7 @@ def _encode_json_bytes(json_object: object) -> bytes:
|
|||
|
||||
|
||||
def respond_with_json(
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
code: int,
|
||||
json_object: Any,
|
||||
send_cors: bool = False,
|
||||
|
@ -787,7 +794,7 @@ def respond_with_json(
|
|||
|
||||
|
||||
def respond_with_json_bytes(
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
code: int,
|
||||
json_bytes: bytes,
|
||||
send_cors: bool = False,
|
||||
|
@ -825,7 +832,7 @@ def respond_with_json_bytes(
|
|||
|
||||
|
||||
async def _async_write_json_to_request_in_thread(
|
||||
request: SynapseRequest,
|
||||
request: "SynapseRequest",
|
||||
json_encoder: Callable[[Any], bytes],
|
||||
json_object: Any,
|
||||
) -> None:
|
||||
|
@ -883,7 +890,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
|
|||
_ByteProducer(request, bytes_generator)
|
||||
|
||||
|
||||
def set_cors_headers(request: SynapseRequest) -> None:
|
||||
def set_cors_headers(request: "SynapseRequest") -> None:
|
||||
"""Set the CORS headers so that javascript running in a web browsers can
|
||||
use this API
|
||||
|
||||
|
@ -981,7 +988,7 @@ def set_clickjacking_protection_headers(request: Request) -> None:
|
|||
|
||||
|
||||
def respond_with_redirect(
|
||||
request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False
|
||||
request: "SynapseRequest", url: bytes, statusCode: int = FOUND, cors: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Write a 302 (or other specified status code) response to the request, if it is still alive.
|
||||
|
|
|
@ -21,25 +21,29 @@ from zope.interface import implementer
|
|||
|
||||
from twisted.internet.address import UNIXAddress
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.interfaces import IAddress, IReactorTime
|
||||
from twisted.internet.interfaces import IAddress
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.web.http import HTTPChannel
|
||||
from twisted.web.resource import IResource, Resource
|
||||
from twisted.web.server import Request, Site
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.http import get_request_user_agent, redact_uri
|
||||
from synapse.http.proxy import ProxySite
|
||||
from synapse.http.request_metrics import RequestMetrics, requests_counter
|
||||
from synapse.logging.context import (
|
||||
ContextRequest,
|
||||
LoggingContext,
|
||||
PreserveLoggingContext,
|
||||
)
|
||||
from synapse.types import Requester
|
||||
from synapse.types import ISynapseReactor, Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import opentracing
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_next_request_seq = 0
|
||||
|
@ -102,7 +106,7 @@ class SynapseRequest(Request):
|
|||
# A boolean indicating whether `render_deferred` should be cancelled if the
|
||||
# client disconnects early. Expected to be set by the coroutine started by
|
||||
# `Resource.render`, if rendering is asynchronous.
|
||||
self.is_render_cancellable = False
|
||||
self.is_render_cancellable: bool = False
|
||||
|
||||
global _next_request_seq
|
||||
self.request_seq = _next_request_seq
|
||||
|
@ -601,7 +605,7 @@ class _XForwardedForAddress:
|
|||
host: str
|
||||
|
||||
|
||||
class SynapseSite(Site):
|
||||
class SynapseSite(ProxySite):
|
||||
"""
|
||||
Synapse-specific twisted http Site
|
||||
|
||||
|
@ -623,7 +627,8 @@ class SynapseSite(Site):
|
|||
resource: IResource,
|
||||
server_version_string: str,
|
||||
max_request_body_size: int,
|
||||
reactor: IReactorTime,
|
||||
reactor: ISynapseReactor,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
"""
|
||||
|
||||
|
@ -638,7 +643,11 @@ class SynapseSite(Site):
|
|||
dropping the connection
|
||||
reactor: reactor to be used to manage connection timeouts
|
||||
"""
|
||||
Site.__init__(self, resource, reactor=reactor)
|
||||
super().__init__(
|
||||
resource=resource,
|
||||
reactor=reactor,
|
||||
hs=hs,
|
||||
)
|
||||
|
||||
self.site_tag = site_tag
|
||||
self.reactor = reactor
|
||||
|
@ -649,7 +658,9 @@ class SynapseSite(Site):
|
|||
|
||||
request_id_header = config.http_options.request_id_header
|
||||
|
||||
self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886
|
||||
self.experimental_cors_msc3886: bool = (
|
||||
config.http_options.experimental_cors_msc3886
|
||||
)
|
||||
|
||||
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
|
||||
return request_class(
|
||||
|
|
|
@ -1070,7 +1070,7 @@ def trace_servlet(
|
|||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||
tags.HTTP_METHOD: request.get_method(),
|
||||
tags.HTTP_URL: request.get_redacted_uri(),
|
||||
tags.PEER_HOST_IPV6: request.getClientAddress().host,
|
||||
tags.PEER_HOST_IPV6: request.get_client_ip_if_available(),
|
||||
}
|
||||
|
||||
request_name = request.request_metrics.name
|
||||
|
@ -1091,9 +1091,11 @@ def trace_servlet(
|
|||
# with JsonResource).
|
||||
scope.span.set_operation_name(request.request_metrics.name)
|
||||
|
||||
# Mypy seems to think that start_context.tag below can be Optional[str], but
|
||||
# that doesn't appear to be correct and works in practice.
|
||||
request_tags[
|
||||
SynapseTags.REQUEST_TAG
|
||||
] = request.request_metrics.start_context.tag
|
||||
] = request.request_metrics.start_context.tag # type: ignore[assignment]
|
||||
|
||||
# set the tags *after* the servlet completes, in case it decided to
|
||||
# prioritise the span (tags will get dropped on unprioritised spans)
|
||||
|
|
|
@ -375,7 +375,7 @@ class BulkPushRuleEvaluator:
|
|||
# _get_power_levels_and_sender_level in its call to get_user_power_level
|
||||
# (even for room V10.)
|
||||
notification_levels = power_levels.get("notifications", {})
|
||||
if not event.room_version.msc3667_int_only_power_levels:
|
||||
if not event.room_version.enforce_int_power_levels:
|
||||
keys = list(notification_levels.keys())
|
||||
for key in keys:
|
||||
level = notification_levels.get(key, SENTINEL)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# limitations under the License.
|
||||
from typing import Dict
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
|
||||
from synapse.storage.controllers import StorageControllers
|
||||
|
@ -49,7 +50,41 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
|
|||
async def get_context_for_event(
|
||||
storage: StorageControllers, ev: EventBase, user_id: str
|
||||
) -> Dict[str, str]:
|
||||
ctx = {}
|
||||
ctx: Dict[str, str] = {}
|
||||
|
||||
if ev.internal_metadata.outlier:
|
||||
# We don't have state for outliers, so we can't compute the context
|
||||
# except for invites and knocks. (Such events are known as 'out-of-band
|
||||
# memberships' for the user).
|
||||
if ev.type != EventTypes.Member:
|
||||
return ctx
|
||||
|
||||
# We might be able to pull out the display name for the sender straight
|
||||
# from the membership event
|
||||
event_display_name = ev.content.get("displayname")
|
||||
if event_display_name and ev.state_key == ev.sender:
|
||||
ctx["sender_display_name"] = event_display_name
|
||||
|
||||
room_state = []
|
||||
if ev.content.get("membership") == Membership.INVITE:
|
||||
room_state = ev.unsigned.get("invite_room_state", [])
|
||||
elif ev.content.get("membership") == Membership.KNOCK:
|
||||
room_state = ev.unsigned.get("knock_room_state", [])
|
||||
|
||||
# Ideally we'd reuse the logic in `calculate_room_name`, but that gets
|
||||
# complicated to handle partial events vs pulling events from the DB.
|
||||
for state_dict in room_state:
|
||||
type_tuple = (state_dict["type"], state_dict.get("state_key"))
|
||||
if type_tuple == (EventTypes.Member, ev.sender):
|
||||
display_name = state_dict["content"].get("displayname")
|
||||
if display_name:
|
||||
ctx["sender_display_name"] = display_name
|
||||
elif type_tuple == (EventTypes.Name, ""):
|
||||
room_name = state_dict["content"].get("name")
|
||||
if room_name:
|
||||
ctx["name"] = room_name
|
||||
|
||||
return ctx
|
||||
|
||||
room_state_ids = await storage.state.get_state_ids_for_event(ev.event_id)
|
||||
|
||||
|
|
|
@ -462,9 +462,9 @@ class RegisterRestServlet(RestServlet):
|
|||
# the auth layer will store these in sessions.
|
||||
desired_username = None
|
||||
if "username" in body:
|
||||
if not isinstance(body["username"], str) or len(body["username"]) > 512:
|
||||
raise SynapseError(400, "Invalid username")
|
||||
desired_username = body["username"]
|
||||
if not isinstance(desired_username, str) or len(desired_username) > 512:
|
||||
raise SynapseError(400, "Invalid username")
|
||||
|
||||
# fork off as soon as possible for ASes which have completely
|
||||
# different registration flows to normal users
|
||||
|
@ -477,11 +477,6 @@ class RegisterRestServlet(RestServlet):
|
|||
"Appservice token must be provided when using a type of m.login.application_service",
|
||||
)
|
||||
|
||||
# Set the desired user according to the AS API (which uses the
|
||||
# 'user' key not 'username'). Since this is a new addition, we'll
|
||||
# fallback to 'username' if they gave one.
|
||||
desired_username = body.get("user", desired_username)
|
||||
|
||||
# XXX we should check that desired_username is valid. Currently
|
||||
# we give appservices carte blanche for any insanity in mxids,
|
||||
# because the IRC bridges rely on being able to register stupid
|
||||
|
@ -489,7 +484,8 @@ class RegisterRestServlet(RestServlet):
|
|||
|
||||
access_token = self.auth.get_access_token_from_request(request)
|
||||
|
||||
if not isinstance(desired_username, str):
|
||||
# Desired username is either a string or None.
|
||||
if desired_username is None:
|
||||
raise SynapseError(400, "Desired Username is missing or not a string")
|
||||
|
||||
result = await self._do_appservice_registration(
|
||||
|
|
|
@ -1117,7 +1117,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
|||
# Ensure the redacts property in the content matches the one provided in
|
||||
# the URL.
|
||||
room_version = await self._store.get_room_version(room_id)
|
||||
if room_version.msc2176_redaction_rules:
|
||||
if room_version.updated_redaction_rules:
|
||||
if "redacts" in content and content["redacts"] != event_id:
|
||||
raise SynapseError(
|
||||
400,
|
||||
|
@ -1151,7 +1151,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
|
|||
"sender": requester.user.to_string(),
|
||||
}
|
||||
# Earlier room versions had a top-level redacts property.
|
||||
if not room_version.msc2176_redaction_rules:
|
||||
if not room_version.updated_redaction_rules:
|
||||
event_dict["redacts"] = event_id
|
||||
|
||||
(
|
||||
|
|
|
@ -667,7 +667,7 @@ async def _mainline_sort(
|
|||
order_map = {}
|
||||
for idx, ev_id in enumerate(event_ids, start=1):
|
||||
depth = await _get_mainline_depth_for_event(
|
||||
event_map[ev_id], mainline_map, event_map, state_res_store
|
||||
clock, event_map[ev_id], mainline_map, event_map, state_res_store
|
||||
)
|
||||
order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id)
|
||||
|
||||
|
@ -682,6 +682,7 @@ async def _mainline_sort(
|
|||
|
||||
|
||||
async def _get_mainline_depth_for_event(
|
||||
clock: Clock,
|
||||
event: EventBase,
|
||||
mainline_map: Dict[str, int],
|
||||
event_map: Dict[str, EventBase],
|
||||
|
@ -704,6 +705,7 @@ async def _get_mainline_depth_for_event(
|
|||
|
||||
# We do an iterative search, replacing `event with the power level in its
|
||||
# auth events (if any)
|
||||
idx = 0
|
||||
while tmp_event:
|
||||
depth = mainline_map.get(tmp_event.event_id)
|
||||
if depth is not None:
|
||||
|
@ -720,6 +722,11 @@ async def _get_mainline_depth_for_event(
|
|||
tmp_event = aev
|
||||
break
|
||||
|
||||
idx += 1
|
||||
|
||||
if idx % _AWAIT_AFTER_ITERATIONS == 0:
|
||||
await clock.sleep(0)
|
||||
|
||||
# Didn't find a power level auth event, so we just return 0
|
||||
return 0
|
||||
|
||||
|
|
|
@ -2136,7 +2136,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
|
|||
raise StoreError(400, "No create event in state")
|
||||
|
||||
# Before MSC2175, the room creator was a separate field.
|
||||
if not room_version.msc2175_implicit_room_creator:
|
||||
if not room_version.implicit_room_creator:
|
||||
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
|
||||
|
||||
if not isinstance(room_creator, str):
|
||||
|
|
|
@ -62,7 +62,6 @@ from synapse.types import (
|
|||
get_domain_from_id,
|
||||
get_localpart_from_id,
|
||||
)
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -771,9 +770,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
|||
# This should be unreachable.
|
||||
raise Exception("Unrecognized database engine")
|
||||
|
||||
for p in profiles:
|
||||
txn.call_after(self.get_user_in_directory.invalidate, (p.user_id,))
|
||||
|
||||
async def add_users_who_share_private_room(
|
||||
self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]]
|
||||
) -> None:
|
||||
|
@ -831,14 +827,12 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
|||
txn.execute(f"{truncate} user_directory_search")
|
||||
txn.execute(f"{truncate} users_in_public_rooms")
|
||||
txn.execute(f"{truncate} users_who_share_private_rooms")
|
||||
txn.call_after(self.get_user_in_directory.invalidate_all)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"delete_all_from_user_dir", _delete_all_from_user_dir_txn
|
||||
)
|
||||
|
||||
@cached()
|
||||
async def get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]:
|
||||
async def _get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]:
|
||||
return await self.db_pool.simple_select_one(
|
||||
table="user_directory",
|
||||
keyvalues={"user_id": user_id},
|
||||
|
@ -900,7 +894,6 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
|
|||
table="users_who_share_private_rooms",
|
||||
keyvalues={"other_user_id": user_id},
|
||||
)
|
||||
txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"remove_from_user_dir", _remove_from_user_dir_txn
|
||||
|
|
|
@ -99,54 +99,3 @@ CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lo
|
|||
-- constraints.
|
||||
ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign
|
||||
FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED;
|
||||
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$
|
||||
BEGIN
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO NOTHING;
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE upsert_read_write_lock_parent();
|
||||
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$
|
||||
DECLARE
|
||||
new_token TEXT;
|
||||
BEGIN
|
||||
SELECT token INTO new_token FROM worker_read_write_locks
|
||||
WHERE
|
||||
lock_name = OLD.lock_name
|
||||
AND lock_key = OLD.lock_key;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
ELSE
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = new_token
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE delete_read_write_lock_parent();
|
||||
|
|
|
@ -70,50 +70,3 @@ CREATE TABLE worker_read_write_locks (
|
|||
CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token);
|
||||
-- Ensures that only one instance can acquire a lock in write mode at a time.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock;
|
||||
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger
|
||||
BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
-- First ensure that `worker_read_write_locks_mode` doesn't have stale
|
||||
-- entries in it, as on SQLite we don't have the foreign key constraint to
|
||||
-- enforce this.
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
);
|
||||
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO NOTHING;
|
||||
END;
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger
|
||||
AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
);
|
||||
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = (
|
||||
SELECT token FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
)
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END;
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
|
||||
|
||||
|
||||
def run_create(
|
||||
cur: LoggingTransaction,
|
||||
database_engine: BaseDatabaseEngine,
|
||||
) -> None:
|
||||
"""
|
||||
An attempt to mitigate a painful race between foreground and background updates
|
||||
touching the `stream_ordering` column of the events table. More info can be found
|
||||
at https://github.com/matrix-org/synapse/issues/15677.
|
||||
"""
|
||||
|
||||
# technically the bg update we're concerned with below should only have been added in
|
||||
# postgres but it doesn't hurt to be extra careful
|
||||
if isinstance(database_engine, PostgresEngine):
|
||||
select_sql = """
|
||||
SELECT 1 FROM background_updates
|
||||
WHERE update_name = 'replace_stream_ordering_column'
|
||||
"""
|
||||
cur.execute(select_sql)
|
||||
res = cur.fetchone()
|
||||
|
||||
# if the background update `replace_stream_ordering_column` is still pending, we need
|
||||
# to drop the indexes added in 7403, and re-add them to the column `stream_ordering2`
|
||||
# with the idea that they will be preserved when the column is renamed `stream_ordering`
|
||||
# after the background update has finished
|
||||
if res:
|
||||
drop_cse_sql = """
|
||||
ALTER TABLE current_state_events DROP CONSTRAINT event_stream_ordering_fkey
|
||||
"""
|
||||
cur.execute(drop_cse_sql)
|
||||
|
||||
drop_lcm_sql = """
|
||||
ALTER TABLE local_current_membership DROP CONSTRAINT event_stream_ordering_fkey
|
||||
"""
|
||||
cur.execute(drop_lcm_sql)
|
||||
|
||||
drop_rm_sql = """
|
||||
ALTER TABLE room_memberships DROP CONSTRAINT event_stream_ordering_fkey
|
||||
"""
|
||||
cur.execute(drop_rm_sql)
|
||||
|
||||
add_cse_sql = """
|
||||
ALTER TABLE current_state_events ADD CONSTRAINT event_stream_ordering_fkey
|
||||
FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
|
||||
"""
|
||||
cur.execute(add_cse_sql)
|
||||
|
||||
add_lcm_sql = """
|
||||
ALTER TABLE local_current_membership ADD CONSTRAINT event_stream_ordering_fkey
|
||||
FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
|
||||
"""
|
||||
cur.execute(add_lcm_sql)
|
||||
|
||||
add_rm_sql = """
|
||||
ALTER TABLE room_memberships ADD CONSTRAINT event_stream_ordering_fkey
|
||||
FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
|
||||
"""
|
||||
cur.execute(add_rm_sql)
|
|
@ -0,0 +1,69 @@
|
|||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Fix up the triggers that were in `78/04_read_write_locks_triggers.sql`
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$
|
||||
BEGIN
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO UPDATE SET write_lock = NEW.write_lock, token = NEW.token;
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
DROP TRIGGER IF EXISTS upsert_read_write_lock_parent_trigger ON worker_read_write_locks;
|
||||
CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE upsert_read_write_lock_parent();
|
||||
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$
|
||||
DECLARE
|
||||
new_token TEXT;
|
||||
BEGIN
|
||||
SELECT token INTO new_token FROM worker_read_write_locks
|
||||
WHERE
|
||||
lock_name = OLD.lock_name
|
||||
AND lock_key = OLD.lock_key
|
||||
LIMIT 1 FOR UPDATE;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key AND token = OLD.token;
|
||||
ELSE
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = new_token
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
DROP TRIGGER IF EXISTS delete_read_write_lock_parent_trigger ON worker_read_write_locks;
|
||||
CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE delete_read_write_lock_parent();
|
|
@ -0,0 +1,65 @@
|
|||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- Fix up the triggers that were in `78/04_read_write_locks_triggers.sql`
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
DROP TRIGGER IF EXISTS upsert_read_write_lock_parent_trigger;
|
||||
CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger
|
||||
BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
-- First ensure that `worker_read_write_locks_mode` doesn't have stale
|
||||
-- entries in it, as on SQLite we don't have the foreign key constraint to
|
||||
-- enforce this.
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
);
|
||||
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO UPDATE SET write_lock = NEW.write_lock, token = NEW.token;
|
||||
END;
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
DROP TRIGGER IF EXISTS delete_read_write_lock_parent_trigger;
|
||||
CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger
|
||||
AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
AND token = OLD.token
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
);
|
||||
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = (
|
||||
SELECT token FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
)
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END;
|
|
@ -348,22 +348,15 @@ class EventID(DomainSpecificString):
|
|||
SIGIL = "$"
|
||||
|
||||
|
||||
mxid_localpart_allowed_characters = set(
|
||||
"_-./=" + string.ascii_lowercase + string.digits
|
||||
MXID_LOCALPART_ALLOWED_CHARACTERS = set(
|
||||
"_-./=+" + string.ascii_lowercase + string.digits
|
||||
)
|
||||
# MSC4007 adds the + to the allowed characters.
|
||||
#
|
||||
# TODO If this was accepted, update the SSO code to support this, see the callers
|
||||
# of map_username_to_mxid_localpart.
|
||||
extended_mxid_localpart_allowed_characters = mxid_localpart_allowed_characters | {"+"}
|
||||
|
||||
# Guest user IDs are purely numeric.
|
||||
GUEST_USER_ID_PATTERN = re.compile(r"^\d+$")
|
||||
|
||||
|
||||
def contains_invalid_mxid_characters(
|
||||
localpart: str, use_extended_character_set: bool
|
||||
) -> bool:
|
||||
def contains_invalid_mxid_characters(localpart: str) -> bool:
|
||||
"""Check for characters not allowed in an mxid or groupid localpart
|
||||
|
||||
Args:
|
||||
|
@ -374,12 +367,7 @@ def contains_invalid_mxid_characters(
|
|||
Returns:
|
||||
True if there are any naughty characters
|
||||
"""
|
||||
allowed_characters = (
|
||||
extended_mxid_localpart_allowed_characters
|
||||
if use_extended_character_set
|
||||
else mxid_localpart_allowed_characters
|
||||
)
|
||||
return any(c not in allowed_characters for c in localpart)
|
||||
return any(c not in MXID_LOCALPART_ALLOWED_CHARACTERS for c in localpart)
|
||||
|
||||
|
||||
UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
|
||||
|
@ -396,7 +384,7 @@ UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
|
|||
# bytes rather than strings
|
||||
#
|
||||
NON_MXID_CHARACTER_PATTERN = re.compile(
|
||||
("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters - {"="})),)).encode(
|
||||
("[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS - {"="})),)).encode(
|
||||
"ascii"
|
||||
)
|
||||
)
|
||||
|
|
|
@ -31,9 +31,7 @@ from tests.unittest import HomeserverTestCase
|
|||
|
||||
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
hs = self.setup_test_homeserver(
|
||||
federation_http_client=None, homeserver_to_use=GenericWorkerServer
|
||||
)
|
||||
hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer)
|
||||
return hs
|
||||
|
||||
def default_config(self) -> JsonDict:
|
||||
|
@ -91,9 +89,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase):
|
|||
@patch("synapse.app.homeserver.KeyResource", new=Mock())
|
||||
class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase):
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
hs = self.setup_test_homeserver(
|
||||
federation_http_client=None, homeserver_to_use=SynapseHomeServer
|
||||
)
|
||||
hs = self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer)
|
||||
return hs
|
||||
|
||||
@parameterized.expand(
|
||||
|
|
|
@ -140,18 +140,16 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
|||
},
|
||||
)
|
||||
|
||||
# As of MSC2176 we now redact the membership and prev_states keys.
|
||||
# As of room versions we now redact the membership, prev_states, and origin keys.
|
||||
self.run_test(
|
||||
{"type": "A", "prev_state": "prev_state", "membership": "join"},
|
||||
{
|
||||
"type": "A",
|
||||
"prev_state": "prev_state",
|
||||
"membership": "join",
|
||||
"origin": "example.com",
|
||||
},
|
||||
{"type": "A", "content": {}, "signatures": {}, "unsigned": {}},
|
||||
room_version=RoomVersions.MSC2176,
|
||||
)
|
||||
|
||||
# As of MSC3989 we now redact the origin key.
|
||||
self.run_test(
|
||||
{"type": "A", "origin": "example.com"},
|
||||
{"type": "A", "content": {}, "signatures": {}, "unsigned": {}},
|
||||
room_version=RoomVersions.MSC3989,
|
||||
room_version=RoomVersions.V11,
|
||||
)
|
||||
|
||||
def test_unsigned(self) -> None:
|
||||
|
@ -236,7 +234,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
|||
"signatures": {},
|
||||
"unsigned": {},
|
||||
},
|
||||
room_version=RoomVersions.MSC2176,
|
||||
room_version=RoomVersions.V11,
|
||||
)
|
||||
|
||||
def test_power_levels(self) -> None:
|
||||
|
@ -286,7 +284,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
|||
"signatures": {},
|
||||
"unsigned": {},
|
||||
},
|
||||
room_version=RoomVersions.MSC2176,
|
||||
room_version=RoomVersions.V11,
|
||||
)
|
||||
|
||||
def test_alias_event(self) -> None:
|
||||
|
@ -349,7 +347,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
|||
"signatures": {},
|
||||
"unsigned": {},
|
||||
},
|
||||
room_version=RoomVersions.MSC2176,
|
||||
room_version=RoomVersions.V11,
|
||||
)
|
||||
|
||||
def test_join_rules(self) -> None:
|
||||
|
@ -472,7 +470,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
|||
"signatures": {},
|
||||
"unsigned": {},
|
||||
},
|
||||
room_version=RoomVersions.MSC3821,
|
||||
room_version=RoomVersions.V11,
|
||||
)
|
||||
|
||||
# Ensure this doesn't break if an invalid field is sent.
|
||||
|
@ -491,7 +489,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
|||
"signatures": {},
|
||||
"unsigned": {},
|
||||
},
|
||||
room_version=RoomVersions.MSC3821,
|
||||
room_version=RoomVersions.V11,
|
||||
)
|
||||
|
||||
self.run_test(
|
||||
|
@ -509,7 +507,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
|||
"signatures": {},
|
||||
"unsigned": {},
|
||||
},
|
||||
room_version=RoomVersions.MSC3821,
|
||||
room_version=RoomVersions.V11,
|
||||
)
|
||||
|
||||
def test_relations(self) -> None:
|
||||
|
|
|
@ -41,7 +41,6 @@ class DeviceTestCase(unittest.HomeserverTestCase):
|
|||
self.appservice_api = mock.Mock()
|
||||
hs = self.setup_test_homeserver(
|
||||
"server",
|
||||
federation_http_client=None,
|
||||
application_service_api=self.appservice_api,
|
||||
)
|
||||
handler = hs.get_device_handler()
|
||||
|
@ -401,7 +400,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
|
|||
|
||||
class DehydrationTestCase(unittest.HomeserverTestCase):
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
hs = self.setup_test_homeserver("server", federation_http_client=None)
|
||||
hs = self.setup_test_homeserver("server")
|
||||
handler = hs.get_device_handler()
|
||||
assert isinstance(handler, DeviceHandler)
|
||||
self.handler = handler
|
||||
|
|
|
@ -57,7 +57,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
|
|||
]
|
||||
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
hs = self.setup_test_homeserver(federation_http_client=None)
|
||||
hs = self.setup_test_homeserver()
|
||||
self.handler = hs.get_federation_handler()
|
||||
self.store = hs.get_datastores().main
|
||||
return hs
|
||||
|
|
|
@ -993,7 +993,6 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
|
|||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
hs = self.setup_test_homeserver(
|
||||
"server",
|
||||
federation_http_client=None,
|
||||
federation_sender=Mock(spec=FederationSender),
|
||||
)
|
||||
return hs
|
||||
|
|
|
@ -587,17 +587,16 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
|
|||
self.assertFalse(self.get_success(d))
|
||||
|
||||
def test_invalid_user_id(self) -> None:
|
||||
invalid_user_id = "+abcd"
|
||||
invalid_user_id = "^abcd"
|
||||
self.get_failure(
|
||||
self.handler.register_user(localpart=invalid_user_id), SynapseError
|
||||
)
|
||||
|
||||
@override_config({"experimental_features": {"msc4009_e164_mxids": True}})
|
||||
def text_extended_user_ids(self) -> None:
|
||||
"""+ should be allowed according to MSC4009."""
|
||||
valid_user_id = "+1234"
|
||||
def test_special_chars(self) -> None:
|
||||
"""Ensure that characters which are allowed in Matrix IDs work."""
|
||||
valid_user_id = "a1234_-./=+"
|
||||
user_id = self.get_success(self.handler.register_user(localpart=valid_user_id))
|
||||
self.assertEqual(user_id, valid_user_id)
|
||||
self.assertEqual(user_id, f"@{valid_user_id}:test")
|
||||
|
||||
def test_invalid_user_id_length(self) -> None:
|
||||
invalid_user_id = "x" * 256
|
||||
|
|
|
@ -17,6 +17,8 @@ import json
|
|||
from typing import Dict, List, Set
|
||||
from unittest.mock import ANY, Mock, call
|
||||
|
||||
from netaddr import IPSet
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
from twisted.web.resource import Resource
|
||||
|
||||
|
@ -24,6 +26,7 @@ from synapse.api.constants import EduTypes
|
|||
from synapse.api.errors import AuthError
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.handlers.typing import TypingWriterHandler
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict, Requester, UserID, create_requester
|
||||
from synapse.util import Clock
|
||||
|
@ -76,6 +79,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
|
|||
# we mock out the federation client too
|
||||
self.mock_federation_client = Mock(spec=["put_json"])
|
||||
self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK"))
|
||||
self.mock_federation_client.agent = MatrixFederationAgent(
|
||||
reactor,
|
||||
tls_client_options_factory=None,
|
||||
user_agent=b"SynapseInTrialTest/0.0.0",
|
||||
ip_allowlist=None,
|
||||
ip_blocklist=IPSet(),
|
||||
)
|
||||
|
||||
# the tests assume that we are starting at unix time 1000
|
||||
reactor.pump((1000,))
|
||||
|
|
|
@ -356,7 +356,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
support_user_id, ProfileInfo("I love support me", None)
|
||||
)
|
||||
)
|
||||
profile = self.get_success(self.store.get_user_in_directory(support_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(support_user_id))
|
||||
self.assertIsNone(profile)
|
||||
display_name = "display_name"
|
||||
|
||||
|
@ -364,7 +364,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
self.get_success(
|
||||
self.handler.handle_local_profile_change(regular_user_id, profile_info)
|
||||
)
|
||||
profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(regular_user_id))
|
||||
assert profile is not None
|
||||
self.assertTrue(profile["display_name"] == display_name)
|
||||
|
||||
|
@ -383,7 +383,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
# profile is in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(r_user_id))
|
||||
assert profile is not None
|
||||
self.assertTrue(profile["display_name"] == display_name)
|
||||
|
||||
|
@ -392,7 +392,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
|
||||
|
||||
# profile is not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(r_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
# update profile after deactivation
|
||||
|
@ -401,7 +401,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
# profile is furthermore not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(r_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
def test_handle_local_profile_change_with_appservice_user(self) -> None:
|
||||
|
@ -411,7 +411,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
# profile is not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(as_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(as_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
# update profile
|
||||
|
@ -421,13 +421,13 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
# profile is still not in directory
|
||||
profile = self.get_success(self.store.get_user_in_directory(as_user_id))
|
||||
profile = self.get_success(self.store._get_user_in_directory(as_user_id))
|
||||
self.assertIsNone(profile)
|
||||
|
||||
def test_handle_local_profile_change_with_appservice_sender(self) -> None:
|
||||
# profile is not in directory
|
||||
profile = self.get_success(
|
||||
self.store.get_user_in_directory(self.appservice.sender)
|
||||
self.store._get_user_in_directory(self.appservice.sender)
|
||||
)
|
||||
self.assertIsNone(profile)
|
||||
|
||||
|
@ -441,7 +441,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
|
|||
|
||||
# profile is still not in directory
|
||||
profile = self.get_success(
|
||||
self.store.get_user_in_directory(self.appservice.sender)
|
||||
self.store._get_user_in_directory(self.appservice.sender)
|
||||
)
|
||||
self.assertIsNone(profile)
|
||||
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Generator
|
||||
from unittest.mock import Mock
|
||||
from typing import Any, Dict, Generator
|
||||
from unittest.mock import ANY, Mock, create_autospec
|
||||
|
||||
from netaddr import IPSet
|
||||
from parameterized import parameterized
|
||||
|
@ -21,10 +21,12 @@ from twisted.internet import defer
|
|||
from twisted.internet.defer import Deferred, TimeoutError
|
||||
from twisted.internet.error import ConnectingCancelledError, DNSLookupError
|
||||
from twisted.test.proto_helpers import MemoryReactor, StringTransport
|
||||
from twisted.web.client import ResponseNeverReceived
|
||||
from twisted.web.client import Agent, ResponseNeverReceived
|
||||
from twisted.web.http import HTTPChannel
|
||||
from twisted.web.http_headers import Headers
|
||||
|
||||
from synapse.api.errors import RequestSendFailed
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.http.matrixfederationclient import (
|
||||
ByteParser,
|
||||
MatrixFederationHttpClient,
|
||||
|
@ -39,7 +41,9 @@ from synapse.logging.context import (
|
|||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.replication._base import BaseMultiWorkerStreamTestCase
|
||||
from tests.server import FakeTransport
|
||||
from tests.test_utils import FakeResponse
|
||||
from tests.unittest import HomeserverTestCase, override_config
|
||||
|
||||
|
||||
|
@ -658,3 +662,275 @@ class FederationClientTests(HomeserverTestCase):
|
|||
self.assertEqual(self.cl.max_short_retry_delay_seconds, 7)
|
||||
self.assertEqual(self.cl.max_long_retries, 20)
|
||||
self.assertEqual(self.cl.max_short_retries, 5)
|
||||
|
||||
|
||||
class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
|
||||
def default_config(self) -> Dict[str, Any]:
|
||||
conf = super().default_config()
|
||||
conf["instance_map"] = {
|
||||
"main": {"host": "testserv", "port": 8765},
|
||||
"federation_sender": {"host": "testserv", "port": 1001},
|
||||
}
|
||||
return conf
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"outbound_federation_restricted_to": ["federation_sender"],
|
||||
"worker_replication_secret": "secret",
|
||||
}
|
||||
)
|
||||
def test_proxy_requests_through_federation_sender_worker(self) -> None:
|
||||
"""
|
||||
Test that all outbound federation requests go through the `federation_sender`
|
||||
worker
|
||||
"""
|
||||
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
||||
# so we can act like some remote server responding to requests
|
||||
mock_client_on_federation_sender = Mock()
|
||||
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
||||
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
||||
|
||||
# Create the `federation_sender` worker
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{"worker_name": "federation_sender"},
|
||||
federation_http_client=mock_client_on_federation_sender,
|
||||
)
|
||||
|
||||
# Fake `remoteserv:8008` responding to requests
|
||||
mock_agent_on_federation_sender.request.side_effect = (
|
||||
lambda *args, **kwargs: defer.succeed(
|
||||
FakeResponse.json(
|
||||
payload={
|
||||
"foo": "bar",
|
||||
}
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# This federation request from the main process should be proxied through the
|
||||
# `federation_sender` worker off to the remote server
|
||||
test_request_from_main_process_d = defer.ensureDeferred(
|
||||
self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
|
||||
)
|
||||
|
||||
# Pump the reactor so our deferred goes through the motions
|
||||
self.pump()
|
||||
|
||||
# Make sure that the request was proxied through the `federation_sender` worker
|
||||
mock_agent_on_federation_sender.request.assert_called_once_with(
|
||||
b"GET",
|
||||
b"matrix-federation://remoteserv:8008/foo/bar",
|
||||
headers=ANY,
|
||||
bodyProducer=ANY,
|
||||
)
|
||||
|
||||
# Make sure the response is as expected back on the main worker
|
||||
res = self.successResultOf(test_request_from_main_process_d)
|
||||
self.assertEqual(res, {"foo": "bar"})
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"outbound_federation_restricted_to": ["federation_sender"],
|
||||
"worker_replication_secret": "secret",
|
||||
}
|
||||
)
|
||||
def test_proxy_request_with_network_error_through_federation_sender_worker(
|
||||
self,
|
||||
) -> None:
|
||||
"""
|
||||
Test that when the outbound federation request fails with a network related
|
||||
error, a sensible error makes its way back to the main process.
|
||||
"""
|
||||
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
||||
# so we can act like some remote server responding to requests
|
||||
mock_client_on_federation_sender = Mock()
|
||||
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
||||
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
||||
|
||||
# Create the `federation_sender` worker
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{"worker_name": "federation_sender"},
|
||||
federation_http_client=mock_client_on_federation_sender,
|
||||
)
|
||||
|
||||
# Fake `remoteserv:8008` responding to requests
|
||||
mock_agent_on_federation_sender.request.side_effect = (
|
||||
lambda *args, **kwargs: defer.fail(ResponseNeverReceived("fake error"))
|
||||
)
|
||||
|
||||
# This federation request from the main process should be proxied through the
|
||||
# `federation_sender` worker off to the remote server
|
||||
test_request_from_main_process_d = defer.ensureDeferred(
|
||||
self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
|
||||
)
|
||||
|
||||
# Pump the reactor so our deferred goes through the motions. We pump with 10
|
||||
# seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries
|
||||
# and finally passes along the error response.
|
||||
self.pump(0.1)
|
||||
|
||||
# Make sure that the request was proxied through the `federation_sender` worker
|
||||
mock_agent_on_federation_sender.request.assert_called_with(
|
||||
b"GET",
|
||||
b"matrix-federation://remoteserv:8008/foo/bar",
|
||||
headers=ANY,
|
||||
bodyProducer=ANY,
|
||||
)
|
||||
|
||||
# Make sure we get some sort of error back on the main worker
|
||||
failure_res = self.failureResultOf(test_request_from_main_process_d)
|
||||
self.assertIsInstance(failure_res.value, RequestSendFailed)
|
||||
self.assertIsInstance(failure_res.value.inner_exception, HttpResponseException)
|
||||
self.assertEqual(failure_res.value.inner_exception.code, 502)
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"outbound_federation_restricted_to": ["federation_sender"],
|
||||
"worker_replication_secret": "secret",
|
||||
}
|
||||
)
|
||||
def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None:
|
||||
"""
|
||||
Test to make sure hop-by-hop headers and addional headers defined in the
|
||||
`Connection` header are discarded when proxying requests
|
||||
"""
|
||||
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
||||
# so we can act like some remote server responding to requests
|
||||
mock_client_on_federation_sender = Mock()
|
||||
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
||||
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
||||
|
||||
# Create the `federation_sender` worker
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{"worker_name": "federation_sender"},
|
||||
federation_http_client=mock_client_on_federation_sender,
|
||||
)
|
||||
|
||||
# Fake `remoteserv:8008` responding to requests
|
||||
mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed(
|
||||
FakeResponse(
|
||||
code=200,
|
||||
body=b'{"foo": "bar"}',
|
||||
headers=Headers(
|
||||
{
|
||||
"Content-Type": ["application/json"],
|
||||
"Connection": ["close, X-Foo, X-Bar"],
|
||||
# Should be removed because it's defined in the `Connection` header
|
||||
"X-Foo": ["foo"],
|
||||
"X-Bar": ["bar"],
|
||||
# Should be removed because it's a hop-by-hop header
|
||||
"Proxy-Authorization": "abcdef",
|
||||
}
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# This federation request from the main process should be proxied through the
|
||||
# `federation_sender` worker off to the remote server
|
||||
test_request_from_main_process_d = defer.ensureDeferred(
|
||||
self.hs.get_federation_http_client().get_json_with_headers(
|
||||
"remoteserv:8008", "foo/bar"
|
||||
)
|
||||
)
|
||||
|
||||
# Pump the reactor so our deferred goes through the motions
|
||||
self.pump()
|
||||
|
||||
# Make sure that the request was proxied through the `federation_sender` worker
|
||||
mock_agent_on_federation_sender.request.assert_called_once_with(
|
||||
b"GET",
|
||||
b"matrix-federation://remoteserv:8008/foo/bar",
|
||||
headers=ANY,
|
||||
bodyProducer=ANY,
|
||||
)
|
||||
|
||||
res, headers = self.successResultOf(test_request_from_main_process_d)
|
||||
header_names = set(headers.keys())
|
||||
|
||||
# Make sure the response does not include the hop-by-hop headers
|
||||
self.assertNotIn(b"X-Foo", header_names)
|
||||
self.assertNotIn(b"X-Bar", header_names)
|
||||
self.assertNotIn(b"Proxy-Authorization", header_names)
|
||||
# Make sure the response is as expected back on the main worker
|
||||
self.assertEqual(res, {"foo": "bar"})
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"outbound_federation_restricted_to": ["federation_sender"],
|
||||
# `worker_replication_secret` is set here so that the test setup is able to pass
|
||||
# but the actual homserver creation test is in the test body below
|
||||
"worker_replication_secret": "secret",
|
||||
}
|
||||
)
|
||||
def test_not_able_to_proxy_requests_through_federation_sender_worker_when_no_secret_configured(
|
||||
self,
|
||||
) -> None:
|
||||
"""
|
||||
Test that we aren't able to proxy any outbound federation requests when
|
||||
`worker_replication_secret` is not configured.
|
||||
"""
|
||||
with self.assertRaises(ConfigError):
|
||||
# Create the `federation_sender` worker
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{
|
||||
"worker_name": "federation_sender",
|
||||
# Test that we aren't able to proxy any outbound federation requests
|
||||
# when `worker_replication_secret` is not configured.
|
||||
"worker_replication_secret": None,
|
||||
},
|
||||
)
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"outbound_federation_restricted_to": ["federation_sender"],
|
||||
"worker_replication_secret": "secret",
|
||||
}
|
||||
)
|
||||
def test_not_able_to_proxy_requests_through_federation_sender_worker_when_wrong_auth_given(
|
||||
self,
|
||||
) -> None:
|
||||
"""
|
||||
Test that we aren't able to proxy any outbound federation requests when the
|
||||
wrong authorization is given.
|
||||
"""
|
||||
# Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
|
||||
# so we can act like some remote server responding to requests
|
||||
mock_client_on_federation_sender = Mock()
|
||||
mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
|
||||
mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
|
||||
|
||||
# Create the `federation_sender` worker
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{
|
||||
"worker_name": "federation_sender",
|
||||
# Test that we aren't able to proxy any outbound federation requests
|
||||
# when `worker_replication_secret` is wrong.
|
||||
"worker_replication_secret": "wrong",
|
||||
},
|
||||
federation_http_client=mock_client_on_federation_sender,
|
||||
)
|
||||
|
||||
# This federation request from the main process should be proxied through the
|
||||
# `federation_sender` worker off but will fail here because it's using the wrong
|
||||
# authorization.
|
||||
test_request_from_main_process_d = defer.ensureDeferred(
|
||||
self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
|
||||
)
|
||||
|
||||
# Pump the reactor so our deferred goes through the motions. We pump with 10
|
||||
# seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries
|
||||
# and finally passes along the error response.
|
||||
self.pump(0.1)
|
||||
|
||||
# Make sure that the request was *NOT* proxied through the `federation_sender`
|
||||
# worker
|
||||
mock_agent_on_federation_sender.request.assert_not_called()
|
||||
|
||||
failure_res = self.failureResultOf(test_request_from_main_process_d)
|
||||
self.assertIsInstance(failure_res.value, HttpResponseException)
|
||||
self.assertEqual(failure_res.value.code, 401)
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import Set
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from synapse.http.proxy import parse_connection_header_value
|
||||
|
||||
from tests.unittest import TestCase
|
||||
|
||||
|
||||
class ProxyTests(TestCase):
|
||||
@parameterized.expand(
|
||||
[
|
||||
[b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}],
|
||||
# No whitespace
|
||||
[b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}],
|
||||
# More whitespace
|
||||
[b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}],
|
||||
# "close" directive in not the first position
|
||||
[b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}],
|
||||
# Normalizes header capitalization
|
||||
[b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}],
|
||||
# Handles header names with whitespace
|
||||
[
|
||||
b"keep-alive, x foo, x bar",
|
||||
{"Keep-Alive", "X foo", "X bar"},
|
||||
],
|
||||
]
|
||||
)
|
||||
def test_parse_connection_header_value(
|
||||
self,
|
||||
connection_header_value: bytes,
|
||||
expected_extra_headers_to_remove: Set[str],
|
||||
) -> None:
|
||||
"""
|
||||
Tests that the connection header value is parsed correctly
|
||||
"""
|
||||
self.assertEqual(
|
||||
expected_extra_headers_to_remove,
|
||||
parse_connection_header_value(connection_header_value),
|
||||
)
|
|
@ -33,7 +33,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
|
|||
from twisted.web.http import HTTPChannel
|
||||
|
||||
from synapse.http.client import BlocklistingReactorWrapper
|
||||
from synapse.http.connectproxyclient import ProxyCredentials
|
||||
from synapse.http.connectproxyclient import BasicProxyCredentials
|
||||
from synapse.http.proxyagent import ProxyAgent, parse_proxy
|
||||
|
||||
from tests.http import (
|
||||
|
@ -205,7 +205,7 @@ class ProxyParserTests(TestCase):
|
|||
"""
|
||||
proxy_cred = None
|
||||
if expected_credentials:
|
||||
proxy_cred = ProxyCredentials(expected_credentials)
|
||||
proxy_cred = BasicProxyCredentials(expected_credentials)
|
||||
self.assertEqual(
|
||||
(
|
||||
expected_scheme,
|
||||
|
|
|
@ -22,6 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor
|
|||
from twisted.web.resource import Resource
|
||||
|
||||
from synapse.app.generic_worker import GenericWorkerServer
|
||||
from synapse.config.workers import InstanceTcpLocationConfig, InstanceUnixLocationConfig
|
||||
from synapse.http.site import SynapseRequest, SynapseSite
|
||||
from synapse.replication.http import ReplicationRestResource
|
||||
from synapse.replication.tcp.client import ReplicationDataHandler
|
||||
|
@ -69,10 +70,10 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
|
|||
# Make a new HomeServer object for the worker
|
||||
self.reactor.lookups["testserv"] = "1.2.3.4"
|
||||
self.worker_hs = self.setup_test_homeserver(
|
||||
federation_http_client=None,
|
||||
homeserver_to_use=GenericWorkerServer,
|
||||
config=self._get_worker_hs_config(),
|
||||
reactor=self.reactor,
|
||||
federation_http_client=None,
|
||||
)
|
||||
|
||||
# Since we use sqlite in memory databases we need to make sure the
|
||||
|
@ -339,7 +340,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
|
|||
# `_handle_http_replication_attempt` like we do with the master HS.
|
||||
instance_name = worker_hs.get_instance_name()
|
||||
instance_loc = worker_hs.config.worker.instance_map.get(instance_name)
|
||||
if instance_loc:
|
||||
if instance_loc and isinstance(instance_loc, InstanceTcpLocationConfig):
|
||||
# Ensure the host is one that has a fake DNS entry.
|
||||
if instance_loc.host not in self.reactor.lookups:
|
||||
raise Exception(
|
||||
|
@ -360,6 +361,10 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
|
|||
instance_loc.port,
|
||||
lambda: self._handle_http_replication_attempt(worker_hs, port),
|
||||
)
|
||||
elif instance_loc and isinstance(instance_loc, InstanceUnixLocationConfig):
|
||||
raise Exception(
|
||||
"Unix sockets are not supported for unit tests at this time."
|
||||
)
|
||||
|
||||
store = worker_hs.get_datastores().main
|
||||
store.db_pool._db_pool = self.database_pool._db_pool
|
||||
|
@ -380,6 +385,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
|
|||
server_version_string="1",
|
||||
max_request_body_size=8192,
|
||||
reactor=self.reactor,
|
||||
hs=worker_hs,
|
||||
)
|
||||
|
||||
worker_hs.get_replication_command_handler().start_replication(worker_hs)
|
||||
|
|
|
@ -14,14 +14,18 @@
|
|||
import logging
|
||||
from unittest.mock import Mock
|
||||
|
||||
from netaddr import IPSet
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.events.builder import EventBuilderFactory
|
||||
from synapse.handlers.typing import TypingWriterHandler
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.rest.admin import register_servlets_for_client_rest_resource
|
||||
from synapse.rest.client import login, room
|
||||
from synapse.types import UserID, create_requester
|
||||
|
||||
from tests.replication._base import BaseMultiWorkerStreamTestCase
|
||||
from tests.server import get_clock
|
||||
from tests.test_utils import make_awaitable
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -41,13 +45,25 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
|
|||
room.register_servlets,
|
||||
]
|
||||
|
||||
def setUp(self) -> None:
|
||||
super().setUp()
|
||||
|
||||
reactor, _ = get_clock()
|
||||
self.matrix_federation_agent = MatrixFederationAgent(
|
||||
reactor,
|
||||
tls_client_options_factory=None,
|
||||
user_agent=b"SynapseInTrialTest/0.0.0",
|
||||
ip_allowlist=None,
|
||||
ip_blocklist=IPSet(),
|
||||
)
|
||||
|
||||
def test_send_event_single_sender(self) -> None:
|
||||
"""Test that using a single federation sender worker correctly sends a
|
||||
new event.
|
||||
"""
|
||||
mock_client = Mock(spec=["put_json"])
|
||||
mock_client.put_json.return_value = make_awaitable({})
|
||||
|
||||
mock_client.agent = self.matrix_federation_agent
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{
|
||||
|
@ -78,6 +94,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
|
|||
"""
|
||||
mock_client1 = Mock(spec=["put_json"])
|
||||
mock_client1.put_json.return_value = make_awaitable({})
|
||||
mock_client1.agent = self.matrix_federation_agent
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{
|
||||
|
@ -92,6 +109,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
|
|||
|
||||
mock_client2 = Mock(spec=["put_json"])
|
||||
mock_client2.put_json.return_value = make_awaitable({})
|
||||
mock_client2.agent = self.matrix_federation_agent
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{
|
||||
|
@ -145,6 +163,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
|
|||
"""
|
||||
mock_client1 = Mock(spec=["put_json"])
|
||||
mock_client1.put_json.return_value = make_awaitable({})
|
||||
mock_client1.agent = self.matrix_federation_agent
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{
|
||||
|
@ -159,6 +178,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
|
|||
|
||||
mock_client2 = Mock(spec=["put_json"])
|
||||
mock_client2.put_json.return_value = make_awaitable({})
|
||||
mock_client2.agent = self.matrix_federation_agent
|
||||
self.make_worker_hs(
|
||||
"synapse.app.generic_worker",
|
||||
{
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue