2021-04-14 06:54:49 -06:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
# This script reads environment variables and generates a shared Synapse worker,
|
|
|
|
# nginx and supervisord configs depending on the workers requested.
|
|
|
|
#
|
|
|
|
# The environment variables it reads are:
|
|
|
|
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
|
|
|
|
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
2023-03-14 10:29:33 -06:00
|
|
|
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
|
|
|
|
# below. Leave empty for no workers. Add a ':' and a number at the end to
|
|
|
|
# multiply that worker. Append multiple worker types with '+' to merge the
|
|
|
|
# worker types into a single worker. Add a name and a '=' to the front of a
|
|
|
|
# worker type to give this instance a name in logs and nginx.
|
|
|
|
# Examples:
|
|
|
|
# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
|
|
|
|
# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
|
|
|
|
# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
|
2022-05-23 07:11:06 -06:00
|
|
|
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
|
|
|
# will be treated as Application Service registration files.
|
2022-05-23 03:29:24 -06:00
|
|
|
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
|
|
|
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
|
|
|
|
# Nginx will be configured to serve TLS on port 8448.
|
2022-06-30 05:58:12 -06:00
|
|
|
# * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
|
|
|
|
# only intended for usage in Complement at the moment.
|
|
|
|
# No stability guarantees are provided.
|
2022-07-05 03:46:20 -06:00
|
|
|
# * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
|
|
|
|
# log level. INFO is the default.
|
|
|
|
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
|
|
|
|
# regardless of the SYNAPSE_LOG_LEVEL setting.
|
2023-06-01 20:27:18 -06:00
|
|
|
# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
|
|
|
|
# for testing.
|
2021-04-14 06:54:49 -06:00
|
|
|
#
|
|
|
|
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
|
|
|
# in the project's README), this script may be run multiple times, and functionality should
|
|
|
|
# continue to work if so.
|
|
|
|
|
2023-11-16 08:00:48 -07:00
|
|
|
import dataclasses
|
2021-04-14 06:54:49 -06:00
|
|
|
import os
|
2022-10-14 06:29:49 -06:00
|
|
|
import platform
|
2023-03-14 10:29:33 -06:00
|
|
|
import re
|
2021-04-14 06:54:49 -06:00
|
|
|
import subprocess
|
|
|
|
import sys
|
2023-11-16 09:07:29 -07:00
|
|
|
from argparse import ArgumentParser
|
2023-03-14 10:29:33 -06:00
|
|
|
from collections import defaultdict
|
2023-11-16 08:00:48 -07:00
|
|
|
from dataclasses import dataclass, field
|
2023-03-14 10:29:33 -06:00
|
|
|
from itertools import chain
|
2022-05-23 07:11:06 -06:00
|
|
|
from pathlib import Path
|
2023-03-14 10:29:33 -06:00
|
|
|
from typing import (
|
|
|
|
Any,
|
2023-11-16 08:05:39 -07:00
|
|
|
Callable,
|
2023-03-14 10:29:33 -06:00
|
|
|
Dict,
|
|
|
|
List,
|
|
|
|
Mapping,
|
|
|
|
MutableMapping,
|
|
|
|
NoReturn,
|
|
|
|
Optional,
|
|
|
|
Set,
|
|
|
|
SupportsIndex,
|
|
|
|
)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
import yaml
|
2022-06-08 03:57:05 -06:00
|
|
|
from jinja2 import Environment, FileSystemLoader
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
|
2023-05-11 04:30:56 -06:00
|
|
|
MAIN_PROCESS_INSTANCE_NAME = "main"
|
|
|
|
MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
|
|
|
|
MAIN_PROCESS_REPLICATION_PORT = 9093
|
2023-07-11 12:08:06 -06:00
|
|
|
# Obviously, these would only be used with the UNIX socket option
|
|
|
|
MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
|
|
|
|
MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
|
2021-04-14 06:54:49 -06:00
|
|
|
|
2023-11-16 09:07:05 -07:00
|
|
|
# We place a file at this path to indicate that the script has already been
|
|
|
|
# run and should not be run again.
|
|
|
|
MARKER_FILE_PATH = "/conf/workers_have_been_configured"
|
|
|
|
|
2023-11-16 08:00:48 -07:00
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class WorkerTemplate:
|
2023-12-06 08:14:41 -07:00
|
|
|
"""
|
|
|
|
A definition of individual settings for a specific worker type.
|
|
|
|
A worker name can be fed into the template in order to generate a config.
|
|
|
|
|
|
|
|
These worker templates can be merged with `merge_worker_template_configs`
|
|
|
|
in order for a single worker to be made from multiple templates.
|
|
|
|
"""
|
|
|
|
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources: Set[str] = field(default_factory=set)
|
|
|
|
endpoint_patterns: Set[str] = field(default_factory=set)
|
2023-11-16 08:26:11 -07:00
|
|
|
# (worker_name) -> {config}
|
2023-11-16 08:05:39 -07:00
|
|
|
shared_extra_conf: Callable[[str], Dict[str, Any]] = lambda _worker_name: {}
|
2023-11-16 08:00:48 -07:00
|
|
|
worker_extra_conf: str = ""
|
|
|
|
|
2023-11-16 08:52:52 -07:00
|
|
|
# True if and only if multiple of this worker type are allowed.
|
|
|
|
sharding_allowed: bool = True
|
|
|
|
|
2023-11-16 08:00:48 -07:00
|
|
|
|
2023-11-16 08:26:11 -07:00
|
|
|
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
|
|
|
|
# Watching /_matrix/client needs a "client" listener
|
|
|
|
# Watching /_matrix/federation needs a "federation" listener
|
|
|
|
# Watching /_matrix/media and related needs a "media" listener
|
|
|
|
# Stream Writers require "client" and "replication" listeners because they
|
|
|
|
# have to attach by instance_map to the master process and have client endpoints.
|
2023-11-16 08:00:48 -07:00
|
|
|
WORKERS_CONFIG: Dict[str, WorkerTemplate] = {
|
2023-11-16 08:47:28 -07:00
|
|
|
"pusher": WorkerTemplate(
|
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"pusher_instances": [worker_name],
|
|
|
|
}
|
|
|
|
),
|
2023-11-16 08:00:48 -07:00
|
|
|
"user_dir": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client"},
|
|
|
|
endpoint_patterns={
|
2021-11-17 08:30:24 -07:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2023-11-16 08:05:39 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"update_user_directory_from_worker": worker_name
|
2023-03-14 10:29:33 -06:00
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"media_repository": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"media"},
|
|
|
|
endpoint_patterns={
|
2021-04-14 06:54:49 -06:00
|
|
|
"^/_matrix/media/",
|
|
|
|
"^/_synapse/admin/v1/purge_media_cache$",
|
|
|
|
"^/_synapse/admin/v1/room/.*/media.*$",
|
|
|
|
"^/_synapse/admin/v1/user/.*/media.*$",
|
|
|
|
"^/_synapse/admin/v1/media/.*$",
|
|
|
|
"^/_synapse/admin/v1/quarantine_media/.*$",
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2022-11-09 05:02:15 -07:00
|
|
|
# The first configured media worker will run the media background jobs
|
2023-11-16 08:05:39 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
2022-11-09 05:02:15 -07:00
|
|
|
"enable_media_repo": False,
|
2023-11-16 08:05:39 -07:00
|
|
|
"media_instance_running_background_jobs": worker_name,
|
2022-11-09 05:02:15 -07:00
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
worker_extra_conf="enable_media_repo: true",
|
|
|
|
),
|
|
|
|
"appservice": WorkerTemplate(
|
2023-11-16 08:05:39 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"notify_appservices_from_worker": worker_name
|
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
2023-11-16 08:47:28 -07:00
|
|
|
"federation_sender": WorkerTemplate(
|
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"federation_sender_instances": [worker_name],
|
|
|
|
}
|
|
|
|
),
|
2023-11-16 08:00:48 -07:00
|
|
|
"synchrotron": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client"},
|
|
|
|
endpoint_patterns={
|
2021-11-17 08:30:24 -07:00
|
|
|
"^/_matrix/client/(v2_alpha|r0|v3)/sync$",
|
|
|
|
"^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3)/initialSync$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$",
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"client_reader": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client"},
|
|
|
|
endpoint_patterns={
|
2022-10-12 04:46:13 -06:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
|
|
|
|
"^/_matrix/client/v1/rooms/.*/hierarchy$",
|
|
|
|
"^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
|
2022-10-14 05:16:50 -06:00
|
|
|
"^/_matrix/client/v1/rooms/.*/threads$",
|
2022-10-12 04:46:13 -06:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
|
|
|
|
"^/_matrix/client/versions$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
|
|
|
"^/_matrix/client/(r0|v3|unstable)/register$",
|
2023-03-17 07:50:31 -06:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/register/available$",
|
2022-10-12 04:46:13 -06:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
|
2022-11-28 14:54:18 -07:00
|
|
|
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
|
2022-10-12 04:46:13 -06:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
2023-02-28 09:37:19 -07:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
|
2023-03-27 05:37:17 -06:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/password_policy$",
|
2023-04-14 03:24:06 -06:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
|
2023-04-14 10:26:07 -06:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/capabilities$",
|
2023-09-07 03:26:07 -06:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/notifications$",
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"federation_reader": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"federation"},
|
|
|
|
endpoint_patterns={
|
2021-04-14 06:54:49 -06:00
|
|
|
"^/_matrix/federation/(v1|v2)/event/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/state/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/state_ids/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/backfill/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/get_missing_events/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/publicRooms",
|
|
|
|
"^/_matrix/federation/(v1|v2)/query/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/make_join/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/make_leave/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/send_join/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/send_leave/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/invite/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/query_auth/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/event_auth/",
|
2022-11-28 14:54:18 -07:00
|
|
|
"^/_matrix/federation/v1/timestamp_to_event/",
|
2021-04-14 06:54:49 -06:00
|
|
|
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/user/devices/",
|
|
|
|
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
|
|
|
|
"^/_matrix/key/v2/query",
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"federation_inbound": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"federation"},
|
|
|
|
endpoint_patterns={"/_matrix/federation/(v1|v2)/send/"},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"event_persister": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"replication"},
|
2023-11-16 08:47:28 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"stream_writers": {"events": [worker_name]}
|
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"background_worker": WorkerTemplate(
|
2023-03-14 10:29:33 -06:00
|
|
|
# This worker cannot be sharded. Therefore, there should only ever be one
|
|
|
|
# background worker. This is enforced for the safety of your database.
|
2023-11-16 08:05:39 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {"run_background_tasks_on": worker_name},
|
2023-11-16 08:52:52 -07:00
|
|
|
sharding_allowed=False,
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"event_creator": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client"},
|
|
|
|
endpoint_patterns={
|
2021-11-17 08:30:24 -07:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
|
2023-03-02 10:59:53 -07:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
|
2021-11-17 08:30:24 -07:00
|
|
|
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"frontend_proxy": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client", "replication"},
|
|
|
|
endpoint_patterns={"^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"},
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"account_data": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client", "replication"},
|
|
|
|
endpoint_patterns={
|
2022-11-08 06:14:00 -07:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/.*/tags",
|
|
|
|
"^/_matrix/client/(r0|v3|unstable)/.*/account_data",
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2023-11-16 08:47:28 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"stream_writers": {"account_data": [worker_name]}
|
|
|
|
},
|
2023-11-16 08:52:52 -07:00
|
|
|
sharding_allowed=False,
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"presence": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client", "replication"},
|
|
|
|
endpoint_patterns={"^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"},
|
2023-11-16 08:47:28 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"stream_writers": {"presence": [worker_name]}
|
|
|
|
},
|
2023-11-16 08:52:52 -07:00
|
|
|
sharding_allowed=False,
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"receipts": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client", "replication"},
|
|
|
|
endpoint_patterns={
|
2022-11-08 06:14:00 -07:00
|
|
|
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
|
|
|
|
"^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
|
2023-11-16 08:12:18 -07:00
|
|
|
},
|
2023-11-16 08:47:28 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"stream_writers": {"receipts": [worker_name]}
|
|
|
|
},
|
2023-11-16 08:52:52 -07:00
|
|
|
sharding_allowed=False,
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"to_device": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client", "replication"},
|
|
|
|
endpoint_patterns={"^/_matrix/client/(r0|v3|unstable)/sendToDevice/"},
|
2023-11-16 08:47:28 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"stream_writers": {"to_device": [worker_name]}
|
|
|
|
},
|
2023-11-16 08:52:52 -07:00
|
|
|
sharding_allowed=False,
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
|
|
|
"typing": WorkerTemplate(
|
2023-11-16 08:12:18 -07:00
|
|
|
listener_resources={"client", "replication"},
|
|
|
|
endpoint_patterns={"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"},
|
2023-11-16 08:47:28 -07:00
|
|
|
shared_extra_conf=lambda worker_name: {
|
|
|
|
"stream_writers": {"typing": [worker_name]}
|
|
|
|
},
|
2023-11-16 08:52:52 -07:00
|
|
|
sharding_allowed=False,
|
2023-11-16 08:00:48 -07:00
|
|
|
),
|
2021-04-14 06:54:49 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
# Templates for sections that may be inserted multiple times in config files
|
|
|
|
NGINX_LOCATION_CONFIG_BLOCK = """
|
2021-05-24 13:32:45 -06:00
|
|
|
location ~* {endpoint} {{
|
2021-04-14 06:54:49 -06:00
|
|
|
proxy_pass {upstream};
|
|
|
|
proxy_set_header X-Forwarded-For $remote_addr;
|
|
|
|
proxy_set_header X-Forwarded-Proto $scheme;
|
|
|
|
proxy_set_header Host $host;
|
2021-05-24 13:32:45 -06:00
|
|
|
}}
|
2021-04-14 06:54:49 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
2023-03-14 10:29:33 -06:00
|
|
|
upstream {upstream_worker_base_name} {{
|
2021-04-14 06:54:49 -06:00
|
|
|
{body}
|
2021-05-24 13:32:45 -06:00
|
|
|
}}
|
2021-04-14 06:54:49 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
# Utility functions
|
2022-04-25 06:32:35 -06:00
|
|
|
def log(txt: str) -> None:
|
2021-04-14 06:54:49 -06:00
|
|
|
print(txt)
|
|
|
|
|
|
|
|
|
2022-04-25 06:32:35 -06:00
|
|
|
def error(txt: str) -> NoReturn:
|
2022-10-18 05:56:20 -06:00
|
|
|
print(txt, file=sys.stderr)
|
2021-04-14 06:54:49 -06:00
|
|
|
sys.exit(2)
|
|
|
|
|
|
|
|
|
2022-10-18 05:56:20 -06:00
|
|
|
def flush_buffers() -> None:
|
|
|
|
sys.stdout.flush()
|
|
|
|
sys.stderr.flush()
|
|
|
|
|
|
|
|
|
2023-11-16 08:26:17 -07:00
|
|
|
def merge_into(dest: Any, new: Any) -> None:
|
|
|
|
"""
|
|
|
|
Merges `new` into `dest` with the following rules:
|
|
|
|
|
|
|
|
- dicts: values with the same key will be merged recursively
|
|
|
|
- lists: `new` will be appended to `dest`
|
|
|
|
- primitives: they will be checked for equality and inequality will result
|
|
|
|
in a ValueError
|
|
|
|
|
|
|
|
It is an error for `dest` and `new` to be of different types.
|
|
|
|
"""
|
|
|
|
if isinstance(dest, dict) and isinstance(new, dict):
|
|
|
|
for k, v in new.items():
|
|
|
|
if k in dest:
|
|
|
|
merge_into(dest[k], v)
|
|
|
|
else:
|
|
|
|
dest[k] = v
|
|
|
|
elif isinstance(dest, list) and isinstance(new, list):
|
|
|
|
dest.extend(new)
|
|
|
|
elif type(dest) != type(new):
|
|
|
|
raise TypeError(f"Cannot merge {type(dest).__name__} and {type(new).__name__}")
|
|
|
|
elif dest != new:
|
|
|
|
raise ValueError(f"Cannot merge primitive values: {dest!r} != {new!r}")
|
|
|
|
|
|
|
|
|
2023-11-16 08:47:28 -07:00
|
|
|
def merged(a: Dict[str, Any], b: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
|
"""
|
2023-12-06 08:17:55 -07:00
|
|
|
Merges `b` into `a` and returns `a`. Here because we can't use `merge_into`
|
|
|
|
in a lamba conveniently.
|
2023-11-16 08:47:28 -07:00
|
|
|
"""
|
|
|
|
merge_into(a, b)
|
|
|
|
return a
|
|
|
|
|
|
|
|
|
2022-04-25 06:32:35 -06:00
|
|
|
def convert(src: str, dst: str, **template_vars: object) -> None:
|
2021-04-14 06:54:49 -06:00
|
|
|
"""Generate a file from a template
|
|
|
|
|
|
|
|
Args:
|
|
|
|
src: Path to the input file.
|
|
|
|
dst: Path to write to.
|
|
|
|
template_vars: The arguments to replace placeholder variables in the template with.
|
|
|
|
"""
|
|
|
|
# Read the template file
|
2022-06-08 03:57:05 -06:00
|
|
|
# We disable autoescape to prevent template variables from being escaped,
|
|
|
|
# as we're not using HTML.
|
|
|
|
env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False)
|
|
|
|
template = env.get_template(os.path.basename(src))
|
2021-04-14 06:54:49 -06:00
|
|
|
|
2022-06-08 03:57:05 -06:00
|
|
|
# Generate a string from the template.
|
|
|
|
rendered = template.render(**template_vars)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
# Write the generated contents to a file
|
|
|
|
#
|
|
|
|
# We use append mode in case the files have already been written to by something else
|
|
|
|
# (for instance, as part of the instructions in a dockerfile).
|
|
|
|
with open(dst, "a") as outfile:
|
|
|
|
# In case the existing file doesn't end with a newline
|
|
|
|
outfile.write("\n")
|
|
|
|
|
|
|
|
outfile.write(rendered)
|
|
|
|
|
|
|
|
|
2023-11-16 08:49:20 -07:00
|
|
|
def add_worker_to_instance_map(
|
2021-04-14 06:54:49 -06:00
|
|
|
shared_config: dict,
|
|
|
|
worker_name: str,
|
|
|
|
worker_port: int,
|
|
|
|
) -> None:
|
2023-11-16 08:49:20 -07:00
|
|
|
"""
|
|
|
|
Update the shared config map to add the worker in the instance_map.
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
Args:
|
2023-03-14 10:29:33 -06:00
|
|
|
shared_config: The config dict that all worker instances share (after being
|
|
|
|
converted to YAML)
|
2021-04-14 06:54:49 -06:00
|
|
|
worker_name: The name of the worker instance.
|
|
|
|
worker_port: The HTTP replication port that the worker instance is listening on.
|
|
|
|
"""
|
|
|
|
instance_map = shared_config.setdefault("instance_map", {})
|
|
|
|
|
2023-11-16 08:47:28 -07:00
|
|
|
if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
|
|
|
|
instance_map[worker_name] = {
|
|
|
|
"path": f"/run/worker.{worker_port}",
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
instance_map[worker_name] = {
|
|
|
|
"host": "localhost",
|
|
|
|
"port": worker_port,
|
|
|
|
}
|
2023-03-14 10:29:33 -06:00
|
|
|
|
|
|
|
|
|
|
|
def merge_worker_template_configs(
|
2023-12-06 08:14:58 -07:00
|
|
|
left: WorkerTemplate,
|
|
|
|
right: WorkerTemplate,
|
2023-11-16 08:00:48 -07:00
|
|
|
) -> WorkerTemplate:
|
2023-12-06 08:14:58 -07:00
|
|
|
"""Merges two templates together, returning a new template that includes
|
|
|
|
the listeners, endpoint patterns and configuration from both.
|
2022-11-08 06:14:00 -07:00
|
|
|
|
2023-12-06 08:14:58 -07:00
|
|
|
Does not mutate the input templates.
|
2023-03-14 10:29:33 -06:00
|
|
|
"""
|
2023-11-16 08:00:48 -07:00
|
|
|
|
2023-12-06 08:14:58 -07:00
|
|
|
return WorkerTemplate(
|
|
|
|
# include listener resources from both
|
|
|
|
listener_resources=left.listener_resources | right.listener_resources,
|
|
|
|
# include endpoint patterns from both
|
|
|
|
endpoint_patterns=left.endpoint_patterns | right.endpoint_patterns,
|
|
|
|
# merge shared config dictionaries; the worker name will be replaced later
|
|
|
|
shared_extra_conf=lambda worker_name: merged(
|
|
|
|
left.shared_extra_conf(worker_name),
|
|
|
|
right.shared_extra_conf(worker_name),
|
|
|
|
),
|
|
|
|
# There is only one worker type that has a 'worker_extra_conf' and it is
|
|
|
|
# the media_repo. Since duplicate worker types on the same worker don't
|
|
|
|
# work, this is fine.
|
|
|
|
worker_extra_conf=(left.worker_extra_conf + right.worker_extra_conf),
|
|
|
|
# (This is unused, but in principle sharding this hybrid worker type
|
|
|
|
# would be allowed if both constituent types are shardable)
|
|
|
|
sharding_allowed=left.sharding_allowed and right.sharding_allowed,
|
2023-11-16 08:47:28 -07:00
|
|
|
)
|
2023-11-16 08:00:48 -07:00
|
|
|
|
2023-03-14 10:29:33 -06:00
|
|
|
|
2023-12-06 08:18:12 -07:00
|
|
|
def instantiate_worker_template(
|
|
|
|
template: WorkerTemplate, worker_name: str
|
2023-03-14 10:29:33 -06:00
|
|
|
) -> Dict[str, Any]:
|
2023-12-06 08:18:12 -07:00
|
|
|
"""Given a worker template, instantiate it into a worker configuration
|
|
|
|
(which is currently represented as a dictionary).
|
2023-03-14 10:29:33 -06:00
|
|
|
|
|
|
|
Args:
|
2023-12-06 08:18:12 -07:00
|
|
|
template: The WorkerTemplate to template
|
|
|
|
worker_name: The name of the worker to use.
|
|
|
|
Returns: worker configuration dictionary
|
2023-03-14 10:29:33 -06:00
|
|
|
"""
|
2023-12-06 08:18:12 -07:00
|
|
|
worker_config_dict = dataclasses.asdict(template)
|
|
|
|
worker_config_dict["shared_extra_conf"] = template.shared_extra_conf(worker_name)
|
|
|
|
worker_config_dict["endpoint_patterns"] = sorted(template.endpoint_patterns)
|
|
|
|
worker_config_dict["listener_resources"] = sorted(template.listener_resources)
|
|
|
|
return worker_config_dict
|
2023-03-14 10:29:33 -06:00
|
|
|
|
|
|
|
|
|
|
|
def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
|
|
|
|
"""
|
|
|
|
Apply multiplier(if found) by returning a new expanded list with some basic error
|
|
|
|
checking.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
worker_types: The unprocessed List of requested workers
|
|
|
|
Returns:
|
|
|
|
A new list with all requested workers expanded.
|
|
|
|
"""
|
|
|
|
# Checking performed:
|
|
|
|
# 1. if worker:2 or more is declared, it will create additional workers up to number
|
|
|
|
# 2. if worker:1, it will create a single copy of this worker as if no number was
|
|
|
|
# given
|
|
|
|
# 3. if worker:0 is declared, this worker will be ignored. This is to allow for
|
|
|
|
# scripting and automated expansion and is intended behaviour.
|
|
|
|
# 4. if worker:NaN or is a negative number, it will error and log it.
|
|
|
|
new_worker_types = []
|
|
|
|
for worker_type in worker_types:
|
|
|
|
if ":" in worker_type:
|
|
|
|
worker_type_components = split_and_strip_string(worker_type, ":", 1)
|
|
|
|
worker_count = 0
|
|
|
|
# Should only be 2 components, a type of worker(s) and an integer as a
|
|
|
|
# string. Cast the number as an int then it can be used as a counter.
|
|
|
|
try:
|
|
|
|
worker_count = int(worker_type_components[1])
|
|
|
|
except ValueError:
|
|
|
|
error(
|
|
|
|
f"Bad number in worker count for '{worker_type}': "
|
|
|
|
f"'{worker_type_components[1]}' is not an integer"
|
|
|
|
)
|
|
|
|
|
|
|
|
# As long as there are more than 0, we add one to the list to make below.
|
|
|
|
for _ in range(worker_count):
|
|
|
|
new_worker_types.append(worker_type_components[0])
|
|
|
|
|
|
|
|
else:
|
|
|
|
# If it's not a real worker_type, it will error out later.
|
|
|
|
new_worker_types.append(worker_type)
|
|
|
|
return new_worker_types
|
|
|
|
|
|
|
|
|
|
|
|
def split_and_strip_string(
|
|
|
|
given_string: str, split_char: str, max_split: SupportsIndex = -1
|
|
|
|
) -> List[str]:
|
|
|
|
"""
|
|
|
|
Helper to split a string on split_char and strip whitespace from each end of each
|
|
|
|
element.
|
|
|
|
Args:
|
|
|
|
given_string: The string to split
|
|
|
|
split_char: The character to split the string on
|
|
|
|
max_split: kwarg for split() to limit how many times the split() happens
|
|
|
|
Returns:
|
|
|
|
A List of strings
|
|
|
|
"""
|
|
|
|
# Removes whitespace from ends of result strings before adding to list. Allow for
|
|
|
|
# overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
|
|
|
|
return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
|
2022-11-08 06:14:00 -07:00
|
|
|
|
2021-04-14 06:54:49 -06:00
|
|
|
|
2022-04-25 06:32:35 -06:00
|
|
|
def generate_base_homeserver_config() -> None:
|
2021-04-14 06:54:49 -06:00
|
|
|
"""Starts Synapse and generates a basic homeserver config, which will later be
|
|
|
|
modified for worker support.
|
|
|
|
|
|
|
|
Raises: CalledProcessError if calling start.py returned a non-zero exit code.
|
|
|
|
"""
|
|
|
|
# start.py already does this for us, so just call that.
|
|
|
|
# note that this script is copied in in the official, monolith dockerfile
|
|
|
|
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
2022-10-18 05:56:20 -06:00
|
|
|
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
|
2023-03-14 10:29:33 -06:00
|
|
|
def parse_worker_types(
|
|
|
|
requested_worker_types: List[str],
|
|
|
|
) -> Dict[str, Set[str]]:
|
|
|
|
"""Read the desired list of requested workers and prepare the data for use in
|
|
|
|
generating worker config files while also checking for potential gotchas.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
requested_worker_types: The list formed from the split environment variable
|
|
|
|
containing the unprocessed requests for workers.
|
|
|
|
|
|
|
|
Returns: A dict of worker names to set of worker types. Format:
|
|
|
|
{'worker_name':
|
|
|
|
{'worker_type', 'worker_type2'}
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
# A counter of worker_base_name -> int. Used for determining the name for a given
|
|
|
|
# worker when generating its config file, as each worker's name is just
|
|
|
|
# worker_base_name followed by instance number
|
|
|
|
worker_base_name_counter: Dict[str, int] = defaultdict(int)
|
|
|
|
|
|
|
|
# Similar to above, but more finely grained. This is used to determine we don't have
|
|
|
|
# more than a single worker for cases where multiples would be bad(e.g. presence).
|
|
|
|
worker_type_shard_counter: Dict[str, int] = defaultdict(int)
|
|
|
|
|
|
|
|
# The final result of all this processing
|
|
|
|
dict_to_return: Dict[str, Set[str]] = {}
|
|
|
|
|
|
|
|
# Handle any multipliers requested for given workers.
|
|
|
|
multiple_processed_worker_types = apply_requested_multiplier_for_worker(
|
|
|
|
requested_worker_types
|
|
|
|
)
|
|
|
|
|
|
|
|
# Process each worker_type_string
|
|
|
|
# Examples of expected formats:
|
|
|
|
# - requested_name=type1+type2+type3
|
|
|
|
# - synchrotron
|
|
|
|
# - event_creator+event_persister
|
|
|
|
for worker_type_string in multiple_processed_worker_types:
|
|
|
|
# First, if a name is requested, use that — otherwise generate one.
|
|
|
|
worker_base_name: str = ""
|
|
|
|
if "=" in worker_type_string:
|
|
|
|
# Split on "=", remove extra whitespace from ends then make list
|
|
|
|
worker_type_split = split_and_strip_string(worker_type_string, "=")
|
|
|
|
if len(worker_type_split) > 2:
|
|
|
|
error(
|
|
|
|
"There should only be one '=' in the worker type string. "
|
|
|
|
f"Please fix: {worker_type_string}"
|
|
|
|
)
|
|
|
|
|
|
|
|
# Assign the name
|
|
|
|
worker_base_name = worker_type_split[0]
|
|
|
|
|
|
|
|
if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
|
|
|
|
# Apply a fairly narrow regex to the worker names. Some characters
|
|
|
|
# aren't safe for use in file paths or nginx configurations.
|
|
|
|
# Don't allow to end with a number because we'll add a number
|
|
|
|
# ourselves in a moment.
|
|
|
|
error(
|
|
|
|
"Invalid worker name; please choose a name consisting of "
|
|
|
|
"alphanumeric letters, _ + -, but not ending with a digit: "
|
|
|
|
f"{worker_base_name!r}"
|
|
|
|
)
|
|
|
|
|
|
|
|
# Continue processing the remainder of the worker_type string
|
|
|
|
# with the name override removed.
|
|
|
|
worker_type_string = worker_type_split[1]
|
|
|
|
|
|
|
|
# Split the worker_type_string on "+", remove whitespace from ends then make
|
|
|
|
# the list a set so it's deduplicated.
|
|
|
|
worker_types_set: Set[str] = set(
|
|
|
|
split_and_strip_string(worker_type_string, "+")
|
|
|
|
)
|
|
|
|
|
|
|
|
if not worker_base_name:
|
|
|
|
# No base name specified: generate one deterministically from set of
|
|
|
|
# types
|
|
|
|
worker_base_name = "+".join(sorted(worker_types_set))
|
|
|
|
|
|
|
|
# At this point, we have:
|
|
|
|
# worker_base_name which is the name for the worker, without counter.
|
|
|
|
# worker_types_set which is the set of worker types for this worker.
|
|
|
|
|
|
|
|
# Validate worker_type and make sure we don't allow sharding for a worker type
|
|
|
|
# that doesn't support it. Will error and stop if it is a problem,
|
|
|
|
# e.g. 'background_worker'.
|
|
|
|
for worker_type in worker_types_set:
|
|
|
|
# Verify this is a real defined worker type. If it's not, stop everything so
|
|
|
|
# it can be fixed.
|
|
|
|
if worker_type not in WORKERS_CONFIG:
|
|
|
|
error(
|
|
|
|
f"{worker_type} is an unknown worker type! Was found in "
|
|
|
|
f"'{worker_type_string}'. Please fix!"
|
|
|
|
)
|
|
|
|
|
|
|
|
if worker_type in worker_type_shard_counter:
|
2023-11-16 08:52:52 -07:00
|
|
|
if not WORKERS_CONFIG[worker_type].sharding_allowed:
|
2023-03-14 10:29:33 -06:00
|
|
|
error(
|
|
|
|
f"There can be only a single worker with {worker_type} "
|
|
|
|
"type. Please recount and remove."
|
|
|
|
)
|
|
|
|
# Not in shard counter, must not have seen it yet, add it.
|
|
|
|
worker_type_shard_counter[worker_type] += 1
|
|
|
|
|
|
|
|
# Generate the number for the worker using incrementing counter
|
|
|
|
worker_base_name_counter[worker_base_name] += 1
|
|
|
|
worker_number = worker_base_name_counter[worker_base_name]
|
|
|
|
worker_name = f"{worker_base_name}{worker_number}"
|
|
|
|
|
|
|
|
if worker_number > 1:
|
|
|
|
# If this isn't the first worker, check that we don't have a confusing
|
|
|
|
# mixture of worker types with the same base name.
|
|
|
|
first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
|
|
|
|
if first_worker_with_base_name != worker_types_set:
|
|
|
|
error(
|
|
|
|
f"Can not use worker_name: '{worker_name}' for worker_type(s): "
|
|
|
|
f"{worker_types_set!r}. It is already in use by "
|
|
|
|
f"worker_type(s): {first_worker_with_base_name!r}"
|
|
|
|
)
|
|
|
|
|
|
|
|
dict_to_return[worker_name] = worker_types_set
|
|
|
|
|
|
|
|
return dict_to_return
|
|
|
|
|
|
|
|
|
2022-04-25 06:32:35 -06:00
|
|
|
def generate_worker_files(
|
2023-03-14 10:29:33 -06:00
|
|
|
environ: Mapping[str, str],
|
|
|
|
config_path: str,
|
|
|
|
data_dir: str,
|
|
|
|
requested_worker_types: Dict[str, Set[str]],
|
2022-04-25 06:32:35 -06:00
|
|
|
) -> None:
|
2023-03-14 10:29:33 -06:00
|
|
|
"""Read the desired workers(if any) that is passed in and generate shared
|
|
|
|
homeserver, nginx and supervisord configs.
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
Args:
|
2022-04-25 06:32:35 -06:00
|
|
|
environ: os.environ instance.
|
2022-04-11 07:38:58 -06:00
|
|
|
config_path: The location of the generated Synapse main worker config file.
|
2021-04-14 06:54:49 -06:00
|
|
|
data_dir: The location of the synapse data directory. Where log and
|
|
|
|
user-facing config files live.
|
2023-03-14 10:29:33 -06:00
|
|
|
requested_worker_types: A Dict containing requested workers in the format of
|
|
|
|
{'worker_name1': {'worker_type', ...}}
|
2021-04-14 06:54:49 -06:00
|
|
|
"""
|
|
|
|
# Note that yaml cares about indentation, so care should be taken to insert lines
|
|
|
|
# into files at the correct indentation below.
|
|
|
|
|
2023-07-11 12:08:06 -06:00
|
|
|
# Convenience helper for if using unix sockets instead of host:port
|
|
|
|
using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
|
2023-03-14 10:29:33 -06:00
|
|
|
# First read the original config file and extract the listeners block. Then we'll
|
|
|
|
# add another listener for replication. Later we'll write out the result to the
|
|
|
|
# shared config file.
|
2023-07-11 12:08:06 -06:00
|
|
|
listeners: List[Any]
|
|
|
|
if using_unix_sockets:
|
|
|
|
listeners = [
|
|
|
|
{
|
|
|
|
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
|
|
|
"type": "http",
|
|
|
|
"resources": [{"names": ["replication"]}],
|
|
|
|
}
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
listeners = [
|
|
|
|
{
|
|
|
|
"port": MAIN_PROCESS_REPLICATION_PORT,
|
|
|
|
"bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
|
|
|
"type": "http",
|
|
|
|
"resources": [{"names": ["replication"]}],
|
|
|
|
}
|
|
|
|
]
|
2021-04-14 06:54:49 -06:00
|
|
|
with open(config_path) as file_stream:
|
|
|
|
original_config = yaml.safe_load(file_stream)
|
|
|
|
original_listeners = original_config.get("listeners")
|
|
|
|
if original_listeners:
|
|
|
|
listeners += original_listeners
|
|
|
|
|
|
|
|
# The shared homeserver config. The contents of which will be inserted into the
|
2023-03-14 10:29:33 -06:00
|
|
|
# base shared worker jinja2 template. This config file will be passed to all
|
|
|
|
# workers, included Synapse's main process. It is intended mainly for disabling
|
|
|
|
# functionality when certain workers are spun up, and adding a replication listener.
|
2022-04-13 13:50:08 -06:00
|
|
|
shared_config: Dict[str, Any] = {"listeners": listeners}
|
2021-04-14 06:54:49 -06:00
|
|
|
|
2022-06-27 04:43:20 -06:00
|
|
|
# List of dicts that describe workers.
|
|
|
|
# We pass this to the Supervisor template later to generate the appropriate
|
|
|
|
# program blocks.
|
|
|
|
worker_descriptors: List[Dict[str, Any]] = []
|
2021-04-14 06:54:49 -06:00
|
|
|
|
2023-03-14 10:29:33 -06:00
|
|
|
# Upstreams for load-balancing purposes. This dict takes the form of the worker
|
|
|
|
# type to the ports of each worker. For example:
|
2021-04-14 06:54:49 -06:00
|
|
|
# {
|
|
|
|
# worker_type: {1234, 1235, ...}}
|
|
|
|
# }
|
|
|
|
# and will be used to construct 'upstream' nginx directives.
|
2022-04-08 04:10:58 -06:00
|
|
|
nginx_upstreams: Dict[str, Set[int]] = {}
|
2021-04-14 06:54:49 -06:00
|
|
|
|
2023-03-14 10:29:33 -06:00
|
|
|
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
|
|
|
|
# will be placed after the proxy_pass directive. The main benefit to representing
|
|
|
|
# this data as a dict over a str is that we can easily deduplicate endpoints
|
|
|
|
# across multiple instances of the same worker. The final rendering will be combined
|
|
|
|
# with nginx_upstreams and placed in /etc/nginx/conf.d.
|
|
|
|
nginx_locations: Dict[str, str] = {}
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
# Create the worker configuration directory if it doesn't already exist
|
|
|
|
os.makedirs("/conf/workers", exist_ok=True)
|
|
|
|
|
|
|
|
# Start worker ports from this arbitrary port
|
|
|
|
worker_port = 18009
|
|
|
|
|
2022-04-11 07:38:58 -06:00
|
|
|
# A list of internal endpoints to healthcheck, starting with the main process
|
|
|
|
# which exists even if no workers do.
|
2023-07-11 12:08:06 -06:00
|
|
|
# This list ends up being part of the command line to curl, (curl added support for
|
|
|
|
# Unix sockets in version 7.40).
|
|
|
|
if using_unix_sockets:
|
|
|
|
healthcheck_urls = [
|
|
|
|
f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
|
|
|
|
# The scheme and hostname from the following URL are ignored.
|
|
|
|
# The only thing that matters is the path `/health`
|
|
|
|
"http://localhost/health"
|
|
|
|
]
|
|
|
|
else:
|
|
|
|
healthcheck_urls = ["http://localhost:8080/health"]
|
2022-04-11 07:38:58 -06:00
|
|
|
|
2023-03-14 10:29:33 -06:00
|
|
|
# Get the set of all worker types that we have configured
|
|
|
|
all_worker_types_in_use = set(chain(*requested_worker_types.values()))
|
|
|
|
# Map locations to upstreams (corresponding to worker types) in Nginx
|
|
|
|
# but only if we use the appropriate worker type
|
|
|
|
for worker_type in all_worker_types_in_use:
|
2023-11-16 08:12:18 -07:00
|
|
|
for endpoint_pattern in sorted(WORKERS_CONFIG[worker_type].endpoint_patterns):
|
2023-03-14 10:29:33 -06:00
|
|
|
nginx_locations[endpoint_pattern] = f"http://{worker_type}"
|
|
|
|
|
|
|
|
# For each worker type specified by the user, create config values and write it's
|
|
|
|
# yaml config file
|
|
|
|
for worker_name, worker_types_set in requested_worker_types.items():
|
|
|
|
# The collected and processed data will live here.
|
2023-11-16 08:00:48 -07:00
|
|
|
worker_template: WorkerTemplate = WorkerTemplate()
|
2023-03-14 10:29:33 -06:00
|
|
|
|
|
|
|
# Merge all worker config templates for this worker into a single config
|
|
|
|
for worker_type in worker_types_set:
|
|
|
|
# Merge worker type template configuration data. It's a combination of lists
|
|
|
|
# and dicts, so use this helper.
|
2023-11-16 08:00:48 -07:00
|
|
|
worker_template = merge_worker_template_configs(
|
|
|
|
worker_template, WORKERS_CONFIG[worker_type]
|
2023-03-14 10:29:33 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
# Replace placeholder names in the config template with the actual worker name.
|
2023-12-06 08:18:12 -07:00
|
|
|
worker_config: Dict[str, Any] = instantiate_worker_template(
|
2023-11-16 08:00:48 -07:00
|
|
|
worker_template, worker_name
|
|
|
|
)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
worker_config.update(
|
2022-04-08 04:10:58 -06:00
|
|
|
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
|
2021-04-14 06:54:49 -06:00
|
|
|
)
|
|
|
|
|
2023-11-16 08:55:37 -07:00
|
|
|
# Update the shared config with any options needed to enable this worker.
|
|
|
|
merge_into(shared_config, worker_config["shared_extra_conf"])
|
|
|
|
|
2023-07-11 12:08:06 -06:00
|
|
|
if using_unix_sockets:
|
|
|
|
healthcheck_urls.append(
|
|
|
|
f"--unix-socket /run/worker.{worker_port} http://localhost/health"
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
|
2022-04-11 07:38:58 -06:00
|
|
|
|
2023-11-16 08:49:20 -07:00
|
|
|
# Add all workers to the `instance_map`
|
|
|
|
# Technically only certain types of workers, such as stream writers, are needed
|
|
|
|
# here but it is simpler just to be consistent.
|
|
|
|
add_worker_to_instance_map(shared_config, worker_name, worker_port)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
# Enable the worker in supervisord
|
2022-06-27 04:43:20 -06:00
|
|
|
worker_descriptors.append(worker_config)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
# Write out the worker's logging config file
|
2022-04-13 13:50:08 -06:00
|
|
|
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
# Then a worker config file
|
|
|
|
convert(
|
|
|
|
"/conf/worker.yaml.j2",
|
2023-08-15 06:11:20 -06:00
|
|
|
f"/conf/workers/{worker_name}.yaml",
|
2021-04-14 06:54:49 -06:00
|
|
|
**worker_config,
|
|
|
|
worker_log_config_filepath=log_config_filepath,
|
2023-07-11 12:08:06 -06:00
|
|
|
using_unix_sockets=using_unix_sockets,
|
2021-04-14 06:54:49 -06:00
|
|
|
)
|
|
|
|
|
2023-03-14 10:29:33 -06:00
|
|
|
# Save this worker's port number to the correct nginx upstreams
|
|
|
|
for worker_type in worker_types_set:
|
|
|
|
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
|
|
|
|
|
2021-04-14 06:54:49 -06:00
|
|
|
worker_port += 1
|
|
|
|
|
|
|
|
# Build the nginx location config blocks
|
|
|
|
nginx_location_config = ""
|
|
|
|
for endpoint, upstream in nginx_locations.items():
|
|
|
|
nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format(
|
|
|
|
endpoint=endpoint,
|
|
|
|
upstream=upstream,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Determine the load-balancing upstreams to configure
|
|
|
|
nginx_upstream_config = ""
|
2023-03-14 10:29:33 -06:00
|
|
|
for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
|
2021-04-14 06:54:49 -06:00
|
|
|
body = ""
|
2023-07-11 12:08:06 -06:00
|
|
|
if using_unix_sockets:
|
|
|
|
for port in upstream_worker_ports:
|
|
|
|
body += f" server unix:/run/worker.{port};\n"
|
|
|
|
|
|
|
|
else:
|
|
|
|
for port in upstream_worker_ports:
|
|
|
|
body += f" server localhost:{port};\n"
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
# Add to the list of configured upstreams
|
|
|
|
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
|
2023-03-14 10:29:33 -06:00
|
|
|
upstream_worker_base_name=upstream_worker_base_name,
|
2021-04-14 06:54:49 -06:00
|
|
|
body=body,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Finally, we'll write out the config files.
|
|
|
|
|
2022-04-13 13:50:08 -06:00
|
|
|
# log config for the master process
|
|
|
|
master_log_config = generate_worker_log_config(environ, "master", data_dir)
|
|
|
|
shared_config["log_config"] = master_log_config
|
|
|
|
|
2022-05-23 07:11:06 -06:00
|
|
|
# Find application service registrations
|
|
|
|
appservice_registrations = None
|
|
|
|
appservice_registration_dir = os.environ.get("SYNAPSE_AS_REGISTRATION_DIR")
|
|
|
|
if appservice_registration_dir:
|
|
|
|
# Scan for all YAML files that should be application service registrations.
|
|
|
|
appservice_registrations = [
|
|
|
|
str(reg_path.resolve())
|
|
|
|
for reg_path in Path(appservice_registration_dir).iterdir()
|
|
|
|
if reg_path.suffix.lower() in (".yaml", ".yml")
|
|
|
|
]
|
|
|
|
|
2023-03-14 10:29:33 -06:00
|
|
|
workers_in_use = len(requested_worker_types) > 0
|
2022-06-08 03:57:05 -06:00
|
|
|
|
2023-05-11 04:30:56 -06:00
|
|
|
# If there are workers, add the main process to the instance_map too.
|
|
|
|
if workers_in_use:
|
|
|
|
instance_map = shared_config.setdefault("instance_map", {})
|
2023-07-11 12:08:06 -06:00
|
|
|
if using_unix_sockets:
|
|
|
|
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
|
|
|
"path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
|
|
|
|
"host": MAIN_PROCESS_LOCALHOST_ADDRESS,
|
|
|
|
"port": MAIN_PROCESS_REPLICATION_PORT,
|
|
|
|
}
|
2023-05-11 04:30:56 -06:00
|
|
|
|
2021-04-14 06:54:49 -06:00
|
|
|
# Shared homeserver config
|
|
|
|
convert(
|
|
|
|
"/conf/shared.yaml.j2",
|
|
|
|
"/conf/workers/shared.yaml",
|
|
|
|
shared_worker_config=yaml.dump(shared_config),
|
2022-05-23 07:11:06 -06:00
|
|
|
appservice_registrations=appservice_registrations,
|
2022-06-08 03:57:05 -06:00
|
|
|
enable_redis=workers_in_use,
|
|
|
|
workers_in_use=workers_in_use,
|
2023-07-11 12:08:06 -06:00
|
|
|
using_unix_sockets=using_unix_sockets,
|
2021-04-14 06:54:49 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
# Nginx config
|
|
|
|
convert(
|
|
|
|
"/conf/nginx.conf.j2",
|
|
|
|
"/etc/nginx/conf.d/matrix-synapse.conf",
|
|
|
|
worker_locations=nginx_location_config,
|
|
|
|
upstream_directives=nginx_upstream_config,
|
2022-05-23 03:29:24 -06:00
|
|
|
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
|
|
|
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
2023-07-11 12:08:06 -06:00
|
|
|
using_unix_sockets=using_unix_sockets,
|
2021-04-14 06:54:49 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
# Supervisord config
|
2022-04-14 08:36:49 -06:00
|
|
|
os.makedirs("/etc/supervisor", exist_ok=True)
|
2021-04-14 06:54:49 -06:00
|
|
|
convert(
|
|
|
|
"/conf/supervisord.conf.j2",
|
2022-04-14 08:36:49 -06:00
|
|
|
"/etc/supervisor/supervisord.conf",
|
2021-04-14 06:54:49 -06:00
|
|
|
main_config_path=config_path,
|
2022-06-08 03:57:05 -06:00
|
|
|
enable_redis=workers_in_use,
|
2023-07-11 12:08:06 -06:00
|
|
|
using_unix_sockets=using_unix_sockets,
|
2021-04-14 06:54:49 -06:00
|
|
|
)
|
|
|
|
|
2022-06-27 04:43:20 -06:00
|
|
|
convert(
|
|
|
|
"/conf/synapse.supervisord.conf.j2",
|
|
|
|
"/etc/supervisor/conf.d/synapse.conf",
|
|
|
|
workers=worker_descriptors,
|
|
|
|
main_config_path=config_path,
|
2022-06-30 05:58:12 -06:00
|
|
|
use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"),
|
2022-06-27 04:43:20 -06:00
|
|
|
)
|
|
|
|
|
2021-11-26 07:05:20 -07:00
|
|
|
# healthcheck config
|
|
|
|
convert(
|
|
|
|
"/conf/healthcheck.sh.j2",
|
|
|
|
"/healthcheck.sh",
|
|
|
|
healthcheck_urls=healthcheck_urls,
|
|
|
|
)
|
|
|
|
|
2021-04-14 06:54:49 -06:00
|
|
|
# Ensure the logging directory exists
|
|
|
|
log_dir = data_dir + "/logs"
|
|
|
|
if not os.path.exists(log_dir):
|
|
|
|
os.mkdir(log_dir)
|
|
|
|
|
|
|
|
|
2022-04-13 13:50:08 -06:00
|
|
|
def generate_worker_log_config(
|
|
|
|
environ: Mapping[str, str], worker_name: str, data_dir: str
|
|
|
|
) -> str:
|
|
|
|
"""Generate a log.config file for the given worker.
|
|
|
|
|
|
|
|
Returns: the path to the generated file
|
|
|
|
"""
|
|
|
|
# Check whether we should write worker logs to disk, in addition to the console
|
2022-07-05 03:46:20 -06:00
|
|
|
extra_log_template_args: Dict[str, Optional[str]] = {}
|
2022-04-13 13:50:08 -06:00
|
|
|
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
2022-07-05 03:46:20 -06:00
|
|
|
extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
|
|
|
|
|
|
|
|
extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
|
|
|
|
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
|
|
|
|
"SYNAPSE_LOG_SENSITIVE"
|
|
|
|
)
|
2023-06-01 20:27:18 -06:00
|
|
|
extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING")
|
2022-07-05 03:46:20 -06:00
|
|
|
|
2022-04-13 13:50:08 -06:00
|
|
|
# Render and write the file
|
2022-07-05 03:46:20 -06:00
|
|
|
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
|
2022-04-13 13:50:08 -06:00
|
|
|
convert(
|
|
|
|
"/conf/log.config",
|
|
|
|
log_config_filepath,
|
|
|
|
worker_name=worker_name,
|
|
|
|
**extra_log_template_args,
|
2022-06-30 05:58:12 -06:00
|
|
|
include_worker_name_in_log_line=environ.get(
|
|
|
|
"SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"
|
|
|
|
),
|
2022-04-13 13:50:08 -06:00
|
|
|
)
|
|
|
|
return log_config_filepath
|
|
|
|
|
|
|
|
|
2022-04-25 06:32:35 -06:00
|
|
|
def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
2023-11-16 09:07:29 -07:00
|
|
|
parser = ArgumentParser()
|
|
|
|
parser.add_argument(
|
|
|
|
"--generate-only",
|
|
|
|
action="store_true",
|
|
|
|
help="Only generate configuration; don't run Synapse.",
|
|
|
|
)
|
|
|
|
opts = parser.parse_args(args)
|
|
|
|
|
2021-04-14 06:54:49 -06:00
|
|
|
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
|
|
|
|
config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml")
|
|
|
|
data_dir = environ.get("SYNAPSE_DATA_DIR", "/data")
|
|
|
|
|
|
|
|
# override SYNAPSE_NO_TLS, we don't support TLS in worker mode,
|
|
|
|
# this needs to be handled by a frontend proxy
|
|
|
|
environ["SYNAPSE_NO_TLS"] = "yes"
|
|
|
|
|
|
|
|
# Generate the base homeserver config if one does not yet exist
|
|
|
|
if not os.path.exists(config_path):
|
|
|
|
log("Generating base homeserver config")
|
|
|
|
generate_base_homeserver_config()
|
2023-03-02 08:55:26 -07:00
|
|
|
else:
|
|
|
|
log("Base homeserver config exists—not regenerating")
|
2023-03-14 10:29:33 -06:00
|
|
|
# This script may be run multiple times (mostly by Complement, see note at top of
|
|
|
|
# file). Don't re-configure workers in this instance.
|
2023-11-16 09:07:05 -07:00
|
|
|
|
|
|
|
if not os.path.exists(MARKER_FILE_PATH):
|
2023-03-14 10:29:33 -06:00
|
|
|
# Collect and validate worker_type requests
|
|
|
|
# Read the desired worker configuration from the environment
|
|
|
|
worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
|
|
|
|
# Only process worker_types if they exist
|
|
|
|
if not worker_types_env:
|
|
|
|
# No workers, just the main process
|
|
|
|
worker_types = []
|
|
|
|
requested_worker_types: Dict[str, Any] = {}
|
|
|
|
else:
|
|
|
|
# Split type names by comma, ignoring whitespace.
|
|
|
|
worker_types = split_and_strip_string(worker_types_env, ",")
|
|
|
|
requested_worker_types = parse_worker_types(worker_types)
|
|
|
|
|
2021-04-14 06:54:49 -06:00
|
|
|
# Always regenerate all other config files
|
2023-03-02 08:55:26 -07:00
|
|
|
log("Generating worker config files")
|
2023-03-14 10:29:33 -06:00
|
|
|
generate_worker_files(environ, config_path, data_dir, requested_worker_types)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
# Mark workers as being configured
|
2023-11-16 09:07:05 -07:00
|
|
|
with open(MARKER_FILE_PATH, "w") as f:
|
2021-04-14 06:54:49 -06:00
|
|
|
f.write("")
|
2023-03-02 08:55:26 -07:00
|
|
|
else:
|
|
|
|
log("Worker config exists—not regenerating")
|
2021-04-14 06:54:49 -06:00
|
|
|
|
2023-11-16 09:07:29 -07:00
|
|
|
if opts.generate_only:
|
|
|
|
log("--generate-only: won't run Synapse")
|
|
|
|
return
|
|
|
|
|
2022-10-14 06:29:49 -06:00
|
|
|
# Lifted right out of start.py
|
|
|
|
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
|
|
|
|
|
|
|
if os.path.isfile(jemallocpath):
|
|
|
|
environ["LD_PRELOAD"] = jemallocpath
|
|
|
|
else:
|
|
|
|
log("Could not find %s, will not use" % (jemallocpath,))
|
|
|
|
|
2021-04-14 06:54:49 -06:00
|
|
|
# Start supervisord, which will start Synapse, all of the configured worker
|
|
|
|
# processes, redis, nginx etc. according to the config we created above.
|
2022-04-14 08:36:49 -06:00
|
|
|
log("Starting supervisord")
|
2022-10-18 05:56:20 -06:00
|
|
|
flush_buffers()
|
2022-10-14 06:29:49 -06:00
|
|
|
os.execle(
|
2022-04-14 08:36:49 -06:00
|
|
|
"/usr/local/bin/supervisord",
|
|
|
|
"supervisord",
|
|
|
|
"-c",
|
|
|
|
"/etc/supervisor/supervisord.conf",
|
2022-10-14 06:29:49 -06:00
|
|
|
environ,
|
2022-04-14 08:36:49 -06:00
|
|
|
)
|
2021-04-14 06:54:49 -06:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2023-11-16 09:07:29 -07:00
|
|
|
main(sys.argv[1:], os.environ)
|