2016-04-22 09:09:55 -06:00
|
|
|
# Copyright 2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
import contextlib
|
|
|
|
import logging
|
|
|
|
import time
|
|
|
|
|
2018-07-09 00:09:20 -06:00
|
|
|
from twisted.web.server import Request, Site
|
2018-05-09 16:05:14 -06:00
|
|
|
|
2018-06-05 11:31:40 -06:00
|
|
|
from synapse.http import redact_uri
|
2018-08-15 01:49:59 -06:00
|
|
|
from synapse.http.request_metrics import RequestMetrics, requests_counter
|
|
|
|
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
2018-05-09 16:05:14 -06:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2018-05-09 16:00:11 -06:00
|
|
|
_next_request_seq = 0
|
|
|
|
|
2016-04-22 09:09:55 -06:00
|
|
|
|
|
|
|
class SynapseRequest(Request):
|
2018-05-09 16:05:14 -06:00
|
|
|
"""Class which encapsulates an HTTP request to synapse.
|
|
|
|
|
|
|
|
All of the requests processed in synapse are of this type.
|
|
|
|
|
|
|
|
It extends twisted's twisted.web.server.Request, and adds:
|
|
|
|
* Unique request ID
|
2018-08-15 01:49:59 -06:00
|
|
|
* A log context associated with the request
|
2018-05-09 16:05:14 -06:00
|
|
|
* Redaction of access_token query-params in __repr__
|
|
|
|
* Logging at start and end
|
|
|
|
* Metrics to record CPU, wallclock and DB time by endpoint.
|
|
|
|
|
2018-08-15 01:49:59 -06:00
|
|
|
It also provides a method `processing`, which returns a context manager. If this
|
|
|
|
method is called, the request won't be logged until the context manager is closed;
|
|
|
|
this is useful for asynchronous request handlers which may go on processing the
|
|
|
|
request even after the client has disconnected.
|
2018-05-09 16:05:14 -06:00
|
|
|
|
2018-08-15 01:49:59 -06:00
|
|
|
Attributes:
|
|
|
|
logcontext(LoggingContext) : the log context for this request
|
2018-05-09 16:05:14 -06:00
|
|
|
"""
|
2018-07-17 04:43:18 -06:00
|
|
|
def __init__(self, site, channel, *args, **kw):
|
|
|
|
Request.__init__(self, channel, *args, **kw)
|
2016-04-22 09:09:55 -06:00
|
|
|
self.site = site
|
2018-08-15 01:49:59 -06:00
|
|
|
self._channel = channel # this is used by the tests
|
2016-04-22 09:09:55 -06:00
|
|
|
self.authenticated_entity = None
|
|
|
|
self.start_time = 0
|
|
|
|
|
2018-08-15 01:49:59 -06:00
|
|
|
# we can't yet create the logcontext, as we don't know the method.
|
|
|
|
self.logcontext = None
|
|
|
|
|
2018-05-09 16:00:11 -06:00
|
|
|
global _next_request_seq
|
|
|
|
self.request_seq = _next_request_seq
|
|
|
|
_next_request_seq += 1
|
|
|
|
|
2018-08-15 01:49:59 -06:00
|
|
|
# whether an asynchronous request handler has called processing()
|
|
|
|
self._is_processing = False
|
|
|
|
|
|
|
|
# the time when the asynchronous request handler completed its processing
|
|
|
|
self._processing_finished_time = None
|
|
|
|
|
|
|
|
# what time we finished sending the response to the client (or the connection
|
|
|
|
# dropped)
|
|
|
|
self.finish_time = None
|
|
|
|
|
2016-04-22 09:09:55 -06:00
|
|
|
def __repr__(self):
|
|
|
|
# We overwrite this so that we don't log ``access_token``
|
2018-04-30 06:36:39 -06:00
|
|
|
return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % (
|
2016-04-22 09:09:55 -06:00
|
|
|
self.__class__.__name__,
|
|
|
|
id(self),
|
|
|
|
self.method,
|
|
|
|
self.get_redacted_uri(),
|
|
|
|
self.clientproto,
|
|
|
|
self.site.site_tag,
|
|
|
|
)
|
|
|
|
|
2018-05-09 16:00:11 -06:00
|
|
|
def get_request_id(self):
|
|
|
|
return "%s-%i" % (self.method, self.request_seq)
|
|
|
|
|
2016-04-22 09:09:55 -06:00
|
|
|
def get_redacted_uri(self):
|
2018-09-13 11:57:02 -06:00
|
|
|
uri = self.uri
|
|
|
|
if isinstance(uri, bytes):
|
|
|
|
uri = self.uri.decode('ascii')
|
|
|
|
return redact_uri(uri)
|
2016-04-22 09:09:55 -06:00
|
|
|
|
|
|
|
def get_user_agent(self):
|
2018-04-03 12:41:21 -06:00
|
|
|
return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1]
|
2016-04-22 09:09:55 -06:00
|
|
|
|
2018-05-10 11:46:59 -06:00
|
|
|
def render(self, resrc):
|
2018-08-15 01:49:59 -06:00
|
|
|
# this is called once a Resource has been found to serve the request; in our
|
|
|
|
# case the Resource in question will normally be a JsonResource.
|
|
|
|
|
|
|
|
# create a LogContext for this request
|
|
|
|
request_id = self.get_request_id()
|
|
|
|
logcontext = self.logcontext = LoggingContext(request_id)
|
|
|
|
logcontext.request = request_id
|
|
|
|
|
2018-05-10 11:46:59 -06:00
|
|
|
# override the Server header which is set by twisted
|
|
|
|
self.setHeader("Server", self.site.server_version_string)
|
2018-08-15 01:49:59 -06:00
|
|
|
|
|
|
|
with PreserveLoggingContext(self.logcontext):
|
|
|
|
# we start the request metrics timer here with an initial stab
|
|
|
|
# at the servlet name. For most requests that name will be
|
|
|
|
# JsonResource (or a subclass), and JsonResource._async_render
|
|
|
|
# will update it once it picks a servlet.
|
|
|
|
servlet_name = resrc.__class__.__name__
|
|
|
|
self._started_processing(servlet_name)
|
|
|
|
|
|
|
|
Request.render(self, resrc)
|
|
|
|
|
|
|
|
# record the arrival of the request *after*
|
|
|
|
# dispatching to the handler, so that the handler
|
|
|
|
# can update the servlet name in the request
|
|
|
|
# metrics
|
|
|
|
requests_counter.labels(self.method,
|
|
|
|
self.request_metrics.name).inc()
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def processing(self):
|
|
|
|
"""Record the fact that we are processing this request.
|
|
|
|
|
|
|
|
Returns a context manager; the correct way to use this is:
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def handle_request(request):
|
|
|
|
with request.processing("FooServlet"):
|
|
|
|
yield really_handle_the_request()
|
|
|
|
|
|
|
|
Once the context manager is closed, the completion of the request will be logged,
|
|
|
|
and the various metrics will be updated.
|
|
|
|
"""
|
|
|
|
if self._is_processing:
|
|
|
|
raise RuntimeError("Request is already processing")
|
|
|
|
self._is_processing = True
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
except Exception:
|
|
|
|
# this should already have been caught, and sent back to the client as a 500.
|
|
|
|
logger.exception("Asynchronous messge handler raised an uncaught exception")
|
|
|
|
finally:
|
|
|
|
# the request handler has finished its work and either sent the whole response
|
|
|
|
# back, or handed over responsibility to a Producer.
|
|
|
|
|
|
|
|
self._processing_finished_time = time.time()
|
|
|
|
self._is_processing = False
|
|
|
|
|
|
|
|
# if we've already sent the response, log it now; otherwise, we wait for the
|
|
|
|
# response to be sent.
|
|
|
|
if self.finish_time is not None:
|
|
|
|
self._finished_processing()
|
|
|
|
|
|
|
|
def finish(self):
|
|
|
|
"""Called when all response data has been written to this Request.
|
|
|
|
|
|
|
|
Overrides twisted.web.server.Request.finish to record the finish time and do
|
|
|
|
logging.
|
|
|
|
"""
|
|
|
|
self.finish_time = time.time()
|
|
|
|
Request.finish(self)
|
|
|
|
if not self._is_processing:
|
|
|
|
with PreserveLoggingContext(self.logcontext):
|
|
|
|
self._finished_processing()
|
|
|
|
|
|
|
|
def connectionLost(self, reason):
|
|
|
|
"""Called when the client connection is closed before the response is written.
|
|
|
|
|
|
|
|
Overrides twisted.web.server.Request.connectionLost to record the finish time and
|
|
|
|
do logging.
|
|
|
|
"""
|
|
|
|
self.finish_time = time.time()
|
|
|
|
Request.connectionLost(self, reason)
|
|
|
|
|
|
|
|
# we only get here if the connection to the client drops before we send
|
|
|
|
# the response.
|
|
|
|
#
|
|
|
|
# It's useful to log it here so that we can get an idea of when
|
|
|
|
# the client disconnects.
|
|
|
|
with PreserveLoggingContext(self.logcontext):
|
|
|
|
logger.warn(
|
2018-08-20 11:21:10 -06:00
|
|
|
"Error processing request %r: %s %s", self, reason.type, reason.value,
|
2018-08-15 01:49:59 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
if not self._is_processing:
|
|
|
|
self._finished_processing()
|
2018-05-10 11:46:59 -06:00
|
|
|
|
2018-05-09 16:05:14 -06:00
|
|
|
def _started_processing(self, servlet_name):
|
2018-08-15 01:49:59 -06:00
|
|
|
"""Record the fact that we are processing this request.
|
|
|
|
|
|
|
|
This will log the request's arrival. Once the request completes,
|
|
|
|
be sure to call finished_processing.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
servlet_name (str): the name of the servlet which will be
|
|
|
|
processing this request. This is used in the metrics.
|
|
|
|
|
|
|
|
It is possible to update this afterwards by updating
|
|
|
|
self.request_metrics.name.
|
|
|
|
"""
|
2018-05-28 03:10:27 -06:00
|
|
|
self.start_time = time.time()
|
2018-05-09 16:05:14 -06:00
|
|
|
self.request_metrics = RequestMetrics()
|
2018-05-21 09:03:39 -06:00
|
|
|
self.request_metrics.start(
|
2018-09-05 08:10:47 -06:00
|
|
|
self.start_time, name=servlet_name, method=self.method.decode('ascii'),
|
2018-05-21 09:03:39 -06:00
|
|
|
)
|
2018-05-09 16:05:14 -06:00
|
|
|
|
2016-04-22 09:09:55 -06:00
|
|
|
self.site.access_logger.info(
|
|
|
|
"%s - %s - Received request: %s %s",
|
|
|
|
self.getClientIP(),
|
|
|
|
self.site.site_tag,
|
2018-09-05 08:10:47 -06:00
|
|
|
self.method.decode('ascii'),
|
2016-04-22 09:09:55 -06:00
|
|
|
self.get_redacted_uri()
|
|
|
|
)
|
|
|
|
|
2018-05-09 16:05:14 -06:00
|
|
|
def _finished_processing(self):
|
2018-08-15 01:49:59 -06:00
|
|
|
"""Log the completion of this request and update the metrics
|
|
|
|
"""
|
|
|
|
|
2018-08-20 11:21:10 -06:00
|
|
|
if self.logcontext is None:
|
|
|
|
# this can happen if the connection closed before we read the
|
|
|
|
# headers (so render was never called). In that case we'll already
|
|
|
|
# have logged a warning, so just bail out.
|
|
|
|
return
|
|
|
|
|
2018-08-15 01:49:59 -06:00
|
|
|
usage = self.logcontext.get_resource_usage()
|
|
|
|
|
|
|
|
if self._processing_finished_time is None:
|
|
|
|
# we completed the request without anything calling processing()
|
|
|
|
self._processing_finished_time = time.time()
|
2016-04-22 09:09:55 -06:00
|
|
|
|
2018-08-15 01:49:59 -06:00
|
|
|
# the time between receiving the request and the request handler finishing
|
|
|
|
processing_time = self._processing_finished_time - self.start_time
|
|
|
|
|
|
|
|
# the time between the request handler finishing and the response being sent
|
|
|
|
# to the client (nb may be negative)
|
|
|
|
response_send_time = self.finish_time - self._processing_finished_time
|
2018-05-09 16:05:14 -06:00
|
|
|
|
2018-07-01 04:56:33 -06:00
|
|
|
# need to decode as it could be raw utf-8 bytes
|
|
|
|
# from a IDN servname in an auth header
|
|
|
|
authenticated_entity = self.authenticated_entity
|
2018-08-20 07:54:49 -06:00
|
|
|
if authenticated_entity is not None and isinstance(authenticated_entity, bytes):
|
2018-07-02 04:33:02 -06:00
|
|
|
authenticated_entity = authenticated_entity.decode("utf-8", "replace")
|
2018-07-01 04:56:33 -06:00
|
|
|
|
|
|
|
# ...or could be raw utf-8 bytes in the User-Agent header.
|
|
|
|
# N.B. if you don't do this, the logger explodes cryptically
|
|
|
|
# with maximum recursion trying to log errors about
|
|
|
|
# the charset problem.
|
|
|
|
# c.f. https://github.com/matrix-org/synapse/issues/3471
|
|
|
|
user_agent = self.get_user_agent()
|
|
|
|
if user_agent is not None:
|
2018-07-02 04:33:02 -06:00
|
|
|
user_agent = user_agent.decode("utf-8", "replace")
|
2018-08-15 01:49:59 -06:00
|
|
|
else:
|
|
|
|
user_agent = "-"
|
|
|
|
|
|
|
|
code = str(self.code)
|
|
|
|
if not self.finished:
|
|
|
|
# we didn't send the full response before we gave up (presumably because
|
|
|
|
# the connection dropped)
|
|
|
|
code += "!"
|
2018-07-01 04:56:33 -06:00
|
|
|
|
2016-04-22 09:09:55 -06:00
|
|
|
self.site.access_logger.info(
|
|
|
|
"%s - %s - {%s}"
|
2018-08-15 01:49:59 -06:00
|
|
|
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
|
2018-06-20 23:15:03 -06:00
|
|
|
" %sB %s \"%s %s %s\" \"%s\" [%d dbevts]",
|
2016-04-22 09:09:55 -06:00
|
|
|
self.getClientIP(),
|
|
|
|
self.site.site_tag,
|
2018-07-01 04:56:33 -06:00
|
|
|
authenticated_entity,
|
2018-08-15 01:49:59 -06:00
|
|
|
processing_time,
|
|
|
|
response_send_time,
|
2018-07-10 06:56:07 -06:00
|
|
|
usage.ru_utime,
|
|
|
|
usage.ru_stime,
|
|
|
|
usage.db_sched_duration_sec,
|
|
|
|
usage.db_txn_duration_sec,
|
|
|
|
int(usage.db_txn_count),
|
2016-04-22 09:09:55 -06:00
|
|
|
self.sentLength,
|
2018-08-15 01:49:59 -06:00
|
|
|
code,
|
2016-04-22 09:09:55 -06:00
|
|
|
self.method,
|
|
|
|
self.get_redacted_uri(),
|
|
|
|
self.clientproto,
|
2018-07-01 04:56:33 -06:00
|
|
|
user_agent,
|
2018-07-10 06:56:07 -06:00
|
|
|
usage.evt_db_fetch_count,
|
2016-04-22 09:09:55 -06:00
|
|
|
)
|
|
|
|
|
2018-05-09 16:05:14 -06:00
|
|
|
try:
|
2018-08-15 01:49:59 -06:00
|
|
|
self.request_metrics.stop(self.finish_time, self)
|
2018-05-09 16:05:14 -06:00
|
|
|
except Exception as e:
|
|
|
|
logger.warn("Failed to stop metrics: %r", e)
|
|
|
|
|
2016-04-22 09:09:55 -06:00
|
|
|
|
|
|
|
class XForwardedForRequest(SynapseRequest):
|
|
|
|
def __init__(self, *args, **kw):
|
|
|
|
SynapseRequest.__init__(self, *args, **kw)
|
|
|
|
|
|
|
|
"""
|
|
|
|
Add a layer on top of another request that only uses the value of an
|
|
|
|
X-Forwarded-For header as the result of C{getClientIP}.
|
|
|
|
"""
|
|
|
|
def getClientIP(self):
|
|
|
|
"""
|
|
|
|
@return: The client address (the first address) in the value of the
|
|
|
|
I{X-Forwarded-For header}. If the header is not present, return
|
|
|
|
C{b"-"}.
|
|
|
|
"""
|
|
|
|
return self.requestHeaders.getRawHeaders(
|
|
|
|
b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip()
|
|
|
|
|
|
|
|
|
|
|
|
class SynapseRequestFactory(object):
|
|
|
|
def __init__(self, site, x_forwarded_for):
|
|
|
|
self.site = site
|
|
|
|
self.x_forwarded_for = x_forwarded_for
|
|
|
|
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
|
|
if self.x_forwarded_for:
|
|
|
|
return XForwardedForRequest(self.site, *args, **kwargs)
|
|
|
|
else:
|
|
|
|
return SynapseRequest(self.site, *args, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
class SynapseSite(Site):
|
|
|
|
"""
|
|
|
|
Subclass of a twisted http Site that does access logging with python's
|
|
|
|
standard logging
|
|
|
|
"""
|
2018-05-10 11:46:59 -06:00
|
|
|
def __init__(self, logger_name, site_tag, config, resource,
|
|
|
|
server_version_string, *args, **kwargs):
|
2016-04-22 09:09:55 -06:00
|
|
|
Site.__init__(self, resource, *args, **kwargs)
|
|
|
|
|
|
|
|
self.site_tag = site_tag
|
|
|
|
|
|
|
|
proxied = config.get("x_forwarded", False)
|
|
|
|
self.requestFactory = SynapseRequestFactory(self, proxied)
|
|
|
|
self.access_logger = logging.getLogger(logger_name)
|
2018-08-20 07:54:49 -06:00
|
|
|
self.server_version_string = server_version_string.encode('ascii')
|
2016-04-22 09:09:55 -06:00
|
|
|
|
|
|
|
def log(self, request):
|
|
|
|
pass
|