Fix some typos.
This commit is contained in:
parent
2c9b4a5f16
commit
7950aa8a27
|
@ -98,7 +98,7 @@ class AuthConfig(Config):
|
||||||
# session to be active.
|
# session to be active.
|
||||||
#
|
#
|
||||||
# This defaults to 0, meaning the user is queried for their credentials
|
# This defaults to 0, meaning the user is queried for their credentials
|
||||||
# before every action, but this can be overridden to alow a single
|
# before every action, but this can be overridden to allow a single
|
||||||
# validation to be re-used. This weakens the protections afforded by
|
# validation to be re-used. This weakens the protections afforded by
|
||||||
# the user-interactive authentication process, by allowing for multiple
|
# the user-interactive authentication process, by allowing for multiple
|
||||||
# (and potentially different) operations to use the same validation session.
|
# (and potentially different) operations to use the same validation session.
|
||||||
|
|
|
@ -123,7 +123,7 @@ class RoomDirectoryConfig(Config):
|
||||||
alias (str)
|
alias (str)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
boolean: True if user is allowed to crate the alias
|
boolean: True if user is allowed to create the alias
|
||||||
"""
|
"""
|
||||||
for rule in self._alias_creation_rules:
|
for rule in self._alias_creation_rules:
|
||||||
if rule.matches(user_id, room_id, [alias]):
|
if rule.matches(user_id, room_id, [alias]):
|
||||||
|
|
|
@ -829,7 +829,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
if room_alias:
|
if room_alias:
|
||||||
result["room_alias"] = room_alias.to_string()
|
result["room_alias"] = room_alias.to_string()
|
||||||
|
|
||||||
# Always wait for room creation to progate before returning
|
# Always wait for room creation to propagate before returning
|
||||||
await self._replication.wait_for_stream_position(
|
await self._replication.wait_for_stream_position(
|
||||||
self.hs.config.worker.events_shard_config.get_instance(room_id),
|
self.hs.config.worker.events_shard_config.get_instance(room_id),
|
||||||
"events",
|
"events",
|
||||||
|
|
|
@ -524,7 +524,7 @@ class RulesForRoom:
|
||||||
class _Invalidation:
|
class _Invalidation:
|
||||||
# _Invalidation is passed as an `on_invalidate` callback to bulk_get_push_rules,
|
# _Invalidation is passed as an `on_invalidate` callback to bulk_get_push_rules,
|
||||||
# which means that it it is stored on the bulk_get_push_rules cache entry. In order
|
# which means that it it is stored on the bulk_get_push_rules cache entry. In order
|
||||||
# to ensure that we don't accumulate lots of redunant callbacks on the cache entry,
|
# to ensure that we don't accumulate lots of redundant callbacks on the cache entry,
|
||||||
# we need to ensure that two _Invalidation objects are "equal" if they refer to the
|
# we need to ensure that two _Invalidation objects are "equal" if they refer to the
|
||||||
# same `cache` and `room_id`.
|
# same `cache` and `room_id`.
|
||||||
#
|
#
|
||||||
|
|
|
@ -752,7 +752,7 @@ class PushersRestServlet(RestServlet):
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
pushers: Dictionary containing pushers information.
|
pushers: Dictionary containing pushers information.
|
||||||
total: Number of pushers in dictonary `pushers`.
|
total: Number of pushers in dictionary `pushers`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")
|
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")
|
||||||
|
|
|
@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class RoomUpgradeRestServlet(RestServlet):
|
class RoomUpgradeRestServlet(RestServlet):
|
||||||
"""Handler for room uprade requests.
|
"""Handler for room upgrade requests.
|
||||||
|
|
||||||
Handles requests of the form:
|
Handles requests of the form:
|
||||||
|
|
||||||
|
|
|
@ -137,7 +137,7 @@ def add_file_headers(
|
||||||
# section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
|
# section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
|
||||||
# is (essentially) a single US-ASCII word, and a `quoted-string` is a
|
# is (essentially) a single US-ASCII word, and a `quoted-string` is a
|
||||||
# US-ASCII string surrounded by double-quotes, using backslash as an
|
# US-ASCII string surrounded by double-quotes, using backslash as an
|
||||||
# escape charater. Note that %-encoding is *not* permitted.
|
# escape character. Note that %-encoding is *not* permitted.
|
||||||
#
|
#
|
||||||
# `filename*` is defined to be an `ext-value`, which is defined in
|
# `filename*` is defined to be an `ext-value`, which is defined in
|
||||||
# RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
|
# RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
|
||||||
|
|
|
@ -184,7 +184,7 @@ class MediaRepository:
|
||||||
async def get_local_media(
|
async def get_local_media(
|
||||||
self, request: Request, media_id: str, name: Optional[str]
|
self, request: Request, media_id: str, name: Optional[str]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Responds to reqests for local media, if exists, or returns 404.
|
"""Responds to requests for local media, if exists, or returns 404.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
request: The incoming request.
|
request: The incoming request.
|
||||||
|
@ -306,7 +306,7 @@ class MediaRepository:
|
||||||
media_info = await self.store.get_cached_remote_media(server_name, media_id)
|
media_info = await self.store.get_cached_remote_media(server_name, media_id)
|
||||||
|
|
||||||
# file_id is the ID we use to track the file locally. If we've already
|
# file_id is the ID we use to track the file locally. If we've already
|
||||||
# seen the file then reuse the existing ID, otherwise genereate a new
|
# seen the file then reuse the existing ID, otherwise generate a new
|
||||||
# one.
|
# one.
|
||||||
|
|
||||||
# If we have an entry in the DB, try and look for it
|
# If we have an entry in the DB, try and look for it
|
||||||
|
@ -927,10 +927,10 @@ class MediaRepositoryResource(Resource):
|
||||||
|
|
||||||
<thumbnail>
|
<thumbnail>
|
||||||
|
|
||||||
The thumbnail methods are "crop" and "scale". "scale" trys to return an
|
The thumbnail methods are "crop" and "scale". "scale" tries to return an
|
||||||
image where either the width or the height is smaller than the requested
|
image where either the width or the height is smaller than the requested
|
||||||
size. The client should then scale and letterbox the image if it needs to
|
size. The client should then scale and letterbox the image if it needs to
|
||||||
fit within a given rectangle. "crop" trys to return an image where the
|
fit within a given rectangle. "crop" tries to return an image where the
|
||||||
width and height are close to the requested size and the aspect matches
|
width and height are close to the requested size and the aspect matches
|
||||||
the requested size. The client should scale the image if it needs to fit
|
the requested size. The client should scale the image if it needs to fit
|
||||||
within a given rectangle.
|
within a given rectangle.
|
||||||
|
|
|
@ -615,7 +615,7 @@ class StateResolutionHandler:
|
||||||
event_map:
|
event_map:
|
||||||
a dict from event_id to event, for any events that we happen to
|
a dict from event_id to event, for any events that we happen to
|
||||||
have in flight (eg, those currently being persisted). This will be
|
have in flight (eg, those currently being persisted). This will be
|
||||||
used as a starting point fof finding the state we need; any missing
|
used as a starting point for finding the state we need; any missing
|
||||||
events will be requested via state_map_factory.
|
events will be requested via state_map_factory.
|
||||||
|
|
||||||
If None, all events will be fetched via state_res_store.
|
If None, all events will be fetched via state_res_store.
|
||||||
|
|
|
@ -450,7 +450,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add the messages to the approriate local device inboxes so that
|
# Add the messages to the appropriate local device inboxes so that
|
||||||
# they'll be sent to the devices when they next sync.
|
# they'll be sent to the devices when they next sync.
|
||||||
self._add_messages_to_local_device_inbox_txn(
|
self._add_messages_to_local_device_inbox_txn(
|
||||||
txn, stream_id, local_messages_by_user_then_device
|
txn, stream_id, local_messages_by_user_then_device
|
||||||
|
|
|
@ -371,7 +371,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||||
# and state sets {A} and {B} then walking the auth chains of A and B
|
# and state sets {A} and {B} then walking the auth chains of A and B
|
||||||
# would immediately show that C is reachable by both. However, if we
|
# would immediately show that C is reachable by both. However, if we
|
||||||
# stopped at C then we'd only reach E via the auth chain of B and so E
|
# stopped at C then we'd only reach E via the auth chain of B and so E
|
||||||
# would errornously get included in the returned difference.
|
# would erroneously get included in the returned difference.
|
||||||
#
|
#
|
||||||
# The other thing that we do is limit the number of auth chains we walk
|
# The other thing that we do is limit the number of auth chains we walk
|
||||||
# at once, due to practical limits (i.e. we can only query the database
|
# at once, due to practical limits (i.e. we can only query the database
|
||||||
|
@ -497,7 +497,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||||
|
|
||||||
a_ids = new_aids
|
a_ids = new_aids
|
||||||
|
|
||||||
# Mark that the auth event is reachable by the approriate sets.
|
# Mark that the auth event is reachable by the appropriate sets.
|
||||||
sets.intersection_update(event_to_missing_sets[event_id])
|
sets.intersection_update(event_to_missing_sets[event_id])
|
||||||
|
|
||||||
search.sort()
|
search.sort()
|
||||||
|
|
|
@ -1050,7 +1050,7 @@ class PersistEventsStore:
|
||||||
# Figure out the changes of membership to invalidate the
|
# Figure out the changes of membership to invalidate the
|
||||||
# `get_rooms_for_user` cache.
|
# `get_rooms_for_user` cache.
|
||||||
# We find out which membership events we may have deleted
|
# We find out which membership events we may have deleted
|
||||||
# and which we have added, then we invlidate the caches for all
|
# and which we have added, then we invalidate the caches for all
|
||||||
# those users.
|
# those users.
|
||||||
members_changed = {
|
members_changed = {
|
||||||
state_key
|
state_key
|
||||||
|
|
|
@ -155,7 +155,7 @@ class KeyStore(SQLBaseStore):
|
||||||
(server_name, key_id, from_server) triplet if one already existed.
|
(server_name, key_id, from_server) triplet if one already existed.
|
||||||
Args:
|
Args:
|
||||||
server_name: The name of the server.
|
server_name: The name of the server.
|
||||||
key_id: The identifer of the key this JSON is for.
|
key_id: The identifier of the key this JSON is for.
|
||||||
from_server: The server this JSON was fetched from.
|
from_server: The server this JSON was fetched from.
|
||||||
ts_now_ms: The time now in milliseconds.
|
ts_now_ms: The time now in milliseconds.
|
||||||
ts_valid_until_ms: The time when this json stops being valid.
|
ts_valid_until_ms: The time when this json stops being valid.
|
||||||
|
@ -182,7 +182,7 @@ class KeyStore(SQLBaseStore):
|
||||||
async def get_server_keys_json(
|
async def get_server_keys_json(
|
||||||
self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]]
|
self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]]
|
||||||
) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]:
|
) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]:
|
||||||
"""Retrive the key json for a list of server_keys and key ids.
|
"""Retrieve the key json for a list of server_keys and key ids.
|
||||||
If no keys are found for a given server, key_id and source then
|
If no keys are found for a given server, key_id and source then
|
||||||
that server, key_id, and source triplet entry will be an empty list.
|
that server, key_id, and source triplet entry will be an empty list.
|
||||||
The JSON is returned as a byte array so that it can be efficiently
|
The JSON is returned as a byte array so that it can be efficiently
|
||||||
|
|
|
@ -111,7 +111,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
|
||||||
async def count_daily_sent_e2ee_messages(self):
|
async def count_daily_sent_e2ee_messages(self):
|
||||||
def _count_messages(txn):
|
def _count_messages(txn):
|
||||||
# This is good enough as if you have silly characters in your own
|
# This is good enough as if you have silly characters in your own
|
||||||
# hostname then thats your own fault.
|
# hostname then that's your own fault.
|
||||||
like_clause = "%:" + self.hs.hostname
|
like_clause = "%:" + self.hs.hostname
|
||||||
|
|
||||||
sql = """
|
sql = """
|
||||||
|
@ -167,7 +167,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
|
||||||
async def count_daily_sent_messages(self):
|
async def count_daily_sent_messages(self):
|
||||||
def _count_messages(txn):
|
def _count_messages(txn):
|
||||||
# This is good enough as if you have silly characters in your own
|
# This is good enough as if you have silly characters in your own
|
||||||
# hostname then thats your own fault.
|
# hostname then that's your own fault.
|
||||||
like_clause = "%:" + self.hs.hostname
|
like_clause = "%:" + self.hs.hostname
|
||||||
|
|
||||||
sql = """
|
sql = """
|
||||||
|
|
|
@ -160,7 +160,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_id: List of room_ids.
|
room_id: List of room_ids.
|
||||||
to_key: Max stream id to fetch receipts upto.
|
to_key: Max stream id to fetch receipts up to.
|
||||||
from_key: Min stream id to fetch receipts from. None fetches
|
from_key: Min stream id to fetch receipts from. None fetches
|
||||||
from the start.
|
from the start.
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_ids: The room id.
|
room_ids: The room id.
|
||||||
to_key: Max stream id to fetch receipts upto.
|
to_key: Max stream id to fetch receipts up to.
|
||||||
from_key: Min stream id to fetch receipts from. None fetches
|
from_key: Min stream id to fetch receipts from. None fetches
|
||||||
from the start.
|
from the start.
|
||||||
|
|
||||||
|
@ -312,7 +312,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||||
to a limit of the latest 100 read receipts.
|
to a limit of the latest 100 read receipts.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
to_key: Max stream id to fetch receipts upto.
|
to_key: Max stream id to fetch receipts up to.
|
||||||
from_key: Min stream id to fetch receipts from. None fetches
|
from_key: Min stream id to fetch receipts from. None fetches
|
||||||
from the start.
|
from the start.
|
||||||
|
|
||||||
|
|
|
@ -1044,7 +1044,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
|
||||||
async def _background_add_rooms_room_version_column(
|
async def _background_add_rooms_room_version_column(
|
||||||
self, progress: dict, batch_size: int
|
self, progress: dict, batch_size: int
|
||||||
):
|
):
|
||||||
"""Background update to go and add room version inforamtion to `rooms`
|
"""Background update to go and add room version information to `rooms`
|
||||||
table from `current_state_events` table.
|
table from `current_state_events` table.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ class StateDeltasStore(SQLBaseStore):
|
||||||
def get_current_state_deltas_txn(txn):
|
def get_current_state_deltas_txn(txn):
|
||||||
# First we calculate the max stream id that will give us less than
|
# First we calculate the max stream id that will give us less than
|
||||||
# N results.
|
# N results.
|
||||||
# We arbitarily limit to 100 stream_id entries to ensure we don't
|
# We arbitrarily limit to 100 stream_id entries to ensure we don't
|
||||||
# select toooo many.
|
# select toooo many.
|
||||||
sql = """
|
sql = """
|
||||||
SELECT stream_id, count(*)
|
SELECT stream_id, count(*)
|
||||||
|
@ -81,7 +81,7 @@ class StateDeltasStore(SQLBaseStore):
|
||||||
for stream_id, count in txn:
|
for stream_id, count in txn:
|
||||||
total += count
|
total += count
|
||||||
if total > 100:
|
if total > 100:
|
||||||
# We arbitarily limit to 100 entries to ensure we don't
|
# We arbitrarily limit to 100 entries to ensure we don't
|
||||||
# select toooo many.
|
# select toooo many.
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Clipping current_state_delta_stream rows to stream_id %i",
|
"Clipping current_state_delta_stream rows to stream_id %i",
|
||||||
|
|
|
@ -198,7 +198,7 @@ class TransactionStore(TransactionWorkerStore):
|
||||||
retry_interval: int,
|
retry_interval: int,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Sets the current retry timings for a given destination.
|
"""Sets the current retry timings for a given destination.
|
||||||
Both timings should be zero if retrying is no longer occuring.
|
Both timings should be zero if retrying is no longer occurring.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
destination
|
destination
|
||||||
|
|
|
@ -27,7 +27,7 @@ MAX_STATE_DELTA_HOPS = 100
|
||||||
|
|
||||||
|
|
||||||
class StateGroupBackgroundUpdateStore(SQLBaseStore):
|
class StateGroupBackgroundUpdateStore(SQLBaseStore):
|
||||||
"""Defines functions related to state groups needed to run the state backgroud
|
"""Defines functions related to state groups needed to run the state background
|
||||||
updates.
|
updates.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
|
@ -113,7 +113,7 @@ def prepare_database(
|
||||||
# which should be empty.
|
# which should be empty.
|
||||||
if config is None:
|
if config is None:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"config==None in prepare_database, but databse is not empty"
|
"config==None in prepare_database, but database is not empty"
|
||||||
)
|
)
|
||||||
|
|
||||||
# if it's a worker app, refuse to upgrade the database, to avoid multiple
|
# if it's a worker app, refuse to upgrade the database, to avoid multiple
|
||||||
|
|
|
@ -245,7 +245,7 @@ class MultiWriterIdGenerator:
|
||||||
# and b) noting that if we have seen a run of persisted positions
|
# and b) noting that if we have seen a run of persisted positions
|
||||||
# without gaps (e.g. 5, 6, 7) then we can skip forward (e.g. to 7).
|
# without gaps (e.g. 5, 6, 7) then we can skip forward (e.g. to 7).
|
||||||
#
|
#
|
||||||
# Note: There is no guarentee that the IDs generated by the sequence
|
# Note: There is no guarantee that the IDs generated by the sequence
|
||||||
# will be gapless; gaps can form when e.g. a transaction was rolled
|
# will be gapless; gaps can form when e.g. a transaction was rolled
|
||||||
# back. This means that sometimes we won't be able to skip forward the
|
# back. This means that sometimes we won't be able to skip forward the
|
||||||
# position even though everything has been persisted. However, since
|
# position even though everything has been persisted. However, since
|
||||||
|
@ -418,7 +418,7 @@ class MultiWriterIdGenerator:
|
||||||
# bother, as nothing will read it).
|
# bother, as nothing will read it).
|
||||||
#
|
#
|
||||||
# We only do this on the success path so that the persisted current
|
# We only do this on the success path so that the persisted current
|
||||||
# position points to a persited row with the correct instance name.
|
# position points to a persisted row with the correct instance name.
|
||||||
if self._writers:
|
if self._writers:
|
||||||
txn.call_after(
|
txn.call_after(
|
||||||
run_as_background_process,
|
run_as_background_process,
|
||||||
|
@ -509,7 +509,7 @@ class MultiWriterIdGenerator:
|
||||||
}
|
}
|
||||||
|
|
||||||
def advance(self, instance_name: str, new_id: int):
|
def advance(self, instance_name: str, new_id: int):
|
||||||
"""Advance the postion of the named writer to the given ID, if greater
|
"""Advance the position of the named writer to the given ID, if greater
|
||||||
than existing entry.
|
than existing entry.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
|
@ -675,7 +675,7 @@ class PersistedEventPosition:
|
||||||
persisted in the same room after this position will be after the
|
persisted in the same room after this position will be after the
|
||||||
returned `RoomStreamToken`.
|
returned `RoomStreamToken`.
|
||||||
|
|
||||||
Note: no guarentees are made about ordering w.r.t. events in other
|
Note: no guarantees are made about ordering w.r.t. events in other
|
||||||
rooms.
|
rooms.
|
||||||
"""
|
"""
|
||||||
# Doing the naive thing satisfies the desired properties described in
|
# Doing the naive thing satisfies the desired properties described in
|
||||||
|
|
|
@ -497,7 +497,7 @@ def timeout_deferred(
|
||||||
delayed_call = reactor.callLater(timeout, time_it_out)
|
delayed_call = reactor.callLater(timeout, time_it_out)
|
||||||
|
|
||||||
def convert_cancelled(value: failure.Failure):
|
def convert_cancelled(value: failure.Failure):
|
||||||
# if the orgininal deferred was cancelled, and our timeout has fired, then
|
# if the original deferred was cancelled, and our timeout has fired, then
|
||||||
# the reason it was cancelled was due to our timeout. Turn the CancelledError
|
# the reason it was cancelled was due to our timeout. Turn the CancelledError
|
||||||
# into a TimeoutError.
|
# into a TimeoutError.
|
||||||
if timed_out[0] and value.check(CancelledError):
|
if timed_out[0] and value.check(CancelledError):
|
||||||
|
|
Loading…
Reference in New Issue