Add a maximum size for well-known lookups. (#8950)
This commit is contained in:
parent
e1b8e37f93
commit
ff5c4da128
|
@ -0,0 +1 @@
|
|||
Add a maximum size of 50 kilobytes to .well-known lookups.
|
|
@ -720,11 +720,14 @@ class SimpleHttpClient:
|
|||
|
||||
try:
|
||||
length = await make_deferred_yieldable(
|
||||
readBodyToFile(response, output_stream, max_size)
|
||||
read_body_with_max_size(response, output_stream, max_size)
|
||||
)
|
||||
except BodyExceededMaxSize:
|
||||
SynapseError(
|
||||
502,
|
||||
"Requested file is too large > %r bytes" % (max_size,),
|
||||
Codes.TOO_LARGE,
|
||||
)
|
||||
except SynapseError:
|
||||
# This can happen e.g. because the body is too large.
|
||||
raise
|
||||
except Exception as e:
|
||||
raise SynapseError(502, ("Failed to download remote body: %s" % e)) from e
|
||||
|
||||
|
@ -748,7 +751,11 @@ def _timeout_to_request_timed_out_error(f: Failure):
|
|||
return f
|
||||
|
||||
|
||||
class _ReadBodyToFileProtocol(protocol.Protocol):
|
||||
class BodyExceededMaxSize(Exception):
|
||||
"""The maximum allowed size of the HTTP body was exceeded."""
|
||||
|
||||
|
||||
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
def __init__(
|
||||
self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int]
|
||||
):
|
||||
|
@ -761,13 +768,7 @@ class _ReadBodyToFileProtocol(protocol.Protocol):
|
|||
self.stream.write(data)
|
||||
self.length += len(data)
|
||||
if self.max_size is not None and self.length >= self.max_size:
|
||||
self.deferred.errback(
|
||||
SynapseError(
|
||||
502,
|
||||
"Requested file is too large > %r bytes" % (self.max_size,),
|
||||
Codes.TOO_LARGE,
|
||||
)
|
||||
)
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
self.deferred = defer.Deferred()
|
||||
self.transport.loseConnection()
|
||||
|
||||
|
@ -782,12 +783,15 @@ class _ReadBodyToFileProtocol(protocol.Protocol):
|
|||
self.deferred.errback(reason)
|
||||
|
||||
|
||||
def readBodyToFile(
|
||||
def read_body_with_max_size(
|
||||
response: IResponse, stream: BinaryIO, max_size: Optional[int]
|
||||
) -> defer.Deferred:
|
||||
"""
|
||||
Read a HTTP response body to a file-object. Optionally enforcing a maximum file size.
|
||||
|
||||
If the maximum file size is reached, the returned Deferred will resolve to a
|
||||
Failure with a BodyExceededMaxSize exception.
|
||||
|
||||
Args:
|
||||
response: The HTTP response to read from.
|
||||
stream: The file-object to write to.
|
||||
|
@ -798,7 +802,7 @@ def readBodyToFile(
|
|||
"""
|
||||
|
||||
d = defer.Deferred()
|
||||
response.deliverBody(_ReadBodyToFileProtocol(stream, d, max_size))
|
||||
response.deliverBody(_ReadBodyWithMaxSizeProtocol(stream, d, max_size))
|
||||
return d
|
||||
|
||||
|
||||
|
|
|
@ -15,17 +15,19 @@
|
|||
import logging
|
||||
import random
|
||||
import time
|
||||
from io import BytesIO
|
||||
from typing import Callable, Dict, Optional, Tuple
|
||||
|
||||
import attr
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IReactorTime
|
||||
from twisted.web.client import RedirectAgent, readBody
|
||||
from twisted.web.client import RedirectAgent
|
||||
from twisted.web.http import stringToDatetime
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.web.iweb import IAgent, IResponse
|
||||
|
||||
from synapse.http.client import BodyExceededMaxSize, read_body_with_max_size
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.util import Clock, json_decoder
|
||||
from synapse.util.caches.ttlcache import TTLCache
|
||||
|
@ -53,6 +55,9 @@ WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
|
|||
# lower bound for .well-known cache period
|
||||
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
|
||||
|
||||
# The maximum size (in bytes) to allow a well-known file to be.
|
||||
WELL_KNOWN_MAX_SIZE = 50 * 1024 # 50 KiB
|
||||
|
||||
# Attempt to refetch a cached well-known N% of the TTL before it expires.
|
||||
# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
|
||||
# we'll start trying to refetch 1 minute before it expires.
|
||||
|
@ -229,6 +234,9 @@ class WellKnownResolver:
|
|||
server_name: name of the server, from the requested url
|
||||
retry: Whether to retry the request if it fails.
|
||||
|
||||
Raises:
|
||||
_FetchWellKnownFailure if we fail to lookup a result
|
||||
|
||||
Returns:
|
||||
Returns the response object and body. Response may be a non-200 response.
|
||||
"""
|
||||
|
@ -250,7 +258,11 @@ class WellKnownResolver:
|
|||
b"GET", uri, headers=Headers(headers)
|
||||
)
|
||||
)
|
||||
body = await make_deferred_yieldable(readBody(response))
|
||||
body_stream = BytesIO()
|
||||
await make_deferred_yieldable(
|
||||
read_body_with_max_size(response, body_stream, WELL_KNOWN_MAX_SIZE)
|
||||
)
|
||||
body = body_stream.getvalue()
|
||||
|
||||
if 500 <= response.code < 600:
|
||||
raise Exception("Non-200 response %s" % (response.code,))
|
||||
|
@ -259,6 +271,15 @@ class WellKnownResolver:
|
|||
except defer.CancelledError:
|
||||
# Bail if we've been cancelled
|
||||
raise
|
||||
except BodyExceededMaxSize:
|
||||
# If the well-known file was too large, do not keep attempting
|
||||
# to download it, but consider it a temporary error.
|
||||
logger.warning(
|
||||
"Requested .well-known file for %s is too large > %r bytes",
|
||||
server_name.decode("ascii"),
|
||||
WELL_KNOWN_MAX_SIZE,
|
||||
)
|
||||
raise _FetchWellKnownFailure(temporary=True)
|
||||
except Exception as e:
|
||||
if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
|
||||
logger.info("Error fetching %s: %s", uri_str, e)
|
||||
|
|
|
@ -37,16 +37,19 @@ from twisted.web.iweb import IBodyProducer, IResponse
|
|||
import synapse.metrics
|
||||
import synapse.util.retryutils
|
||||
from synapse.api.errors import (
|
||||
Codes,
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.http import QuieterFileBodyProducer
|
||||
from synapse.http.client import (
|
||||
BlacklistingAgentWrapper,
|
||||
BlacklistingReactorWrapper,
|
||||
BodyExceededMaxSize,
|
||||
encode_query_args,
|
||||
readBodyToFile,
|
||||
read_body_with_max_size,
|
||||
)
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
|
@ -975,9 +978,15 @@ class MatrixFederationHttpClient:
|
|||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
try:
|
||||
d = readBodyToFile(response, output_stream, max_size)
|
||||
d = read_body_with_max_size(response, output_stream, max_size)
|
||||
d.addTimeout(self.default_timeout, self.reactor)
|
||||
length = await make_deferred_yieldable(d)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (max_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s", request.txn_id, request.destination, msg,
|
||||
)
|
||||
SynapseError(502, msg, Codes.TOO_LARGE)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
|
|
|
@ -36,6 +36,7 @@ from synapse.crypto.context_factory import FederationPolicyForHTTPS
|
|||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
from synapse.http.federation.srv_resolver import Server
|
||||
from synapse.http.federation.well_known_resolver import (
|
||||
WELL_KNOWN_MAX_SIZE,
|
||||
WellKnownResolver,
|
||||
_cache_period_from_headers,
|
||||
)
|
||||
|
@ -1107,6 +1108,32 @@ class MatrixFederationAgentTests(unittest.TestCase):
|
|||
r = self.successResultOf(fetch_d)
|
||||
self.assertEqual(r.delegated_server, None)
|
||||
|
||||
def test_well_known_too_large(self):
|
||||
"""A well-known query that returns a result which is too large should be rejected."""
|
||||
self.reactor.lookups["testserv"] = "1.2.3.4"
|
||||
|
||||
fetch_d = defer.ensureDeferred(
|
||||
self.well_known_resolver.get_well_known(b"testserv")
|
||||
)
|
||||
|
||||
# there should be an attempt to connect on port 443 for the .well-known
|
||||
clients = self.reactor.tcpClients
|
||||
self.assertEqual(len(clients), 1)
|
||||
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
|
||||
self.assertEqual(host, "1.2.3.4")
|
||||
self.assertEqual(port, 443)
|
||||
|
||||
self._handle_well_known_connection(
|
||||
client_factory,
|
||||
expected_sni=b"testserv",
|
||||
response_headers={b"Cache-Control": b"max-age=1000"},
|
||||
content=b'{ "m.server": "' + (b"a" * WELL_KNOWN_MAX_SIZE) + b'" }',
|
||||
)
|
||||
|
||||
# The result is sucessful, but disabled delegation.
|
||||
r = self.successResultOf(fetch_d)
|
||||
self.assertIsNone(r.delegated_server)
|
||||
|
||||
def test_srv_fallbacks(self):
|
||||
"""Test that other SRV results are tried if the first one fails.
|
||||
"""
|
||||
|
|
Loading…
Reference in New Issue