Merge branch 'release-v0.6.0' into develop
This commit is contained in:
commit
56db465047
|
@ -1,3 +1,12 @@
|
|||
Changes in synapse 0.6.0 (2014-12-16)
|
||||
=====================================
|
||||
|
||||
* Add new API for media upload and download that supports thumbnailing.
|
||||
* Implement typing notifications.
|
||||
* Fix bugs where we sent events with invalid signatures due to bugs where
|
||||
we incorrectly persisted events.
|
||||
* Improve performance of database queries involving retrieving events.
|
||||
|
||||
Changes in synapse 0.5.4a (2014-12-13)
|
||||
======================================
|
||||
|
||||
|
|
16
UPGRADE.rst
16
UPGRADE.rst
|
@ -1,3 +1,19 @@
|
|||
Upgrading to v0.6.0
|
||||
===================
|
||||
|
||||
This update includes a change to the database schema. To upgrade you first need
|
||||
to upgrade the database by running::
|
||||
|
||||
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
|
||||
|
||||
Where `<db>` is the location of the database, `<server_name>` is the
|
||||
server name as specified in the synapse configuration, and `<signing_key>` is
|
||||
the location of the signing key as specified in the synapse configuration.
|
||||
|
||||
This may take some time to complete. Failures of signatures and content hashes
|
||||
can safely be ignored.
|
||||
|
||||
|
||||
Upgrading to v0.5.1
|
||||
===================
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
|
||||
from synapse.storage import SCHEMA_VERSION, read_schema
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.signatures import SignatureStore
|
||||
from synapse.storage.event_federation import EventFederationStore
|
||||
|
@ -19,8 +21,9 @@ from syutil.crypto.signing_key import decode_verify_key_bytes
|
|||
from syutil.jsonutil import encode_canonical_json
|
||||
|
||||
import argparse
|
||||
import dns.resolver
|
||||
# import dns.resolver
|
||||
import hashlib
|
||||
import httplib
|
||||
import json
|
||||
import sqlite3
|
||||
import syutil
|
||||
|
@ -38,6 +41,8 @@ CREATE TABLE IF NOT EXISTS event_json(
|
|||
|
||||
CREATE INDEX IF NOT EXISTS event_json_id ON event_json(event_id);
|
||||
CREATE INDEX IF NOT EXISTS event_json_room_id ON event_json(room_id);
|
||||
|
||||
PRAGMA user_version = 10;
|
||||
"""
|
||||
|
||||
|
||||
|
@ -142,50 +147,57 @@ class Store(object):
|
|||
store = Store()
|
||||
|
||||
|
||||
def get_key(server_name):
|
||||
print "Getting keys for: %s" % (server_name,)
|
||||
targets = []
|
||||
if ":" in server_name:
|
||||
target, port = server_name.split(":")
|
||||
targets.append((target, int(port)))
|
||||
return
|
||||
try:
|
||||
answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
|
||||
for srv in answers:
|
||||
targets.append((srv.target, srv.port))
|
||||
except dns.resolver.NXDOMAIN:
|
||||
targets.append((server_name, 8448))
|
||||
except:
|
||||
print "Failed to lookup keys for %s" % (server_name,)
|
||||
return {}
|
||||
|
||||
for target, port in targets:
|
||||
url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||
try:
|
||||
keys = json.load(urllib2.urlopen(url, timeout=2))
|
||||
verify_keys = {}
|
||||
for key_id, key_base64 in keys["verify_keys"].items():
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_id, decode_base64(key_base64)
|
||||
)
|
||||
verify_signed_json(keys, server_name, verify_key)
|
||||
verify_keys[key_id] = verify_key
|
||||
print "Got keys for: %s" % (server_name,)
|
||||
return verify_keys
|
||||
except urllib2.URLError:
|
||||
pass
|
||||
|
||||
print "Failed to get keys for %s" % (server_name,)
|
||||
return {}
|
||||
# def get_key(server_name):
|
||||
# print "Getting keys for: %s" % (server_name,)
|
||||
# targets = []
|
||||
# if ":" in server_name:
|
||||
# target, port = server_name.split(":")
|
||||
# targets.append((target, int(port)))
|
||||
# try:
|
||||
# answers = dns.resolver.query("_matrix._tcp." + server_name, "SRV")
|
||||
# for srv in answers:
|
||||
# targets.append((srv.target, srv.port))
|
||||
# except dns.resolver.NXDOMAIN:
|
||||
# targets.append((server_name, 8448))
|
||||
# except:
|
||||
# print "Failed to lookup keys for %s" % (server_name,)
|
||||
# return {}
|
||||
#
|
||||
# for target, port in targets:
|
||||
# url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||
# try:
|
||||
# keys = json.load(urllib2.urlopen(url, timeout=2))
|
||||
# verify_keys = {}
|
||||
# for key_id, key_base64 in keys["verify_keys"].items():
|
||||
# verify_key = decode_verify_key_bytes(
|
||||
# key_id, decode_base64(key_base64)
|
||||
# )
|
||||
# verify_signed_json(keys, server_name, verify_key)
|
||||
# verify_keys[key_id] = verify_key
|
||||
# print "Got keys for: %s" % (server_name,)
|
||||
# return verify_keys
|
||||
# except urllib2.URLError:
|
||||
# pass
|
||||
# except urllib2.HTTPError:
|
||||
# pass
|
||||
# except httplib.HTTPException:
|
||||
# pass
|
||||
#
|
||||
# print "Failed to get keys for %s" % (server_name,)
|
||||
# return {}
|
||||
|
||||
|
||||
def reinsert_events(cursor, server_name, signing_key):
|
||||
print "Running delta: v10"
|
||||
|
||||
cursor.executescript(delta_sql)
|
||||
|
||||
cursor.execute(
|
||||
"SELECT * FROM events ORDER BY rowid ASC"
|
||||
)
|
||||
|
||||
print "Getting events..."
|
||||
|
||||
rows = store.cursor_to_dict(cursor)
|
||||
|
||||
events = store._generate_event_json(cursor, rows)
|
||||
|
@ -207,13 +219,20 @@ def reinsert_events(cursor, server_name, signing_key):
|
|||
}
|
||||
}
|
||||
|
||||
i = 0
|
||||
N = len(events)
|
||||
|
||||
for event in events:
|
||||
for alg_name in event.hashes:
|
||||
if check_event_content_hash(event, algorithms[alg_name]):
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
print "FAIL content hash %s %s" % (alg_name, event.event_id, )
|
||||
if i % 100 == 0:
|
||||
print "Processed: %d/%d events" % (i,N,)
|
||||
i += 1
|
||||
|
||||
# for alg_name in event.hashes:
|
||||
# if check_event_content_hash(event, algorithms[alg_name]):
|
||||
# pass
|
||||
# else:
|
||||
# pass
|
||||
# print "FAIL content hash %s %s" % (alg_name, event.event_id, )
|
||||
|
||||
have_own_correctly_signed = False
|
||||
for host, sigs in event.signatures.items():
|
||||
|
@ -221,7 +240,7 @@ def reinsert_events(cursor, server_name, signing_key):
|
|||
|
||||
for key_id in sigs:
|
||||
if host not in server_keys:
|
||||
server_keys[host] = get_key(host)
|
||||
server_keys[host] = {} # get_key(host)
|
||||
if key_id in server_keys[host]:
|
||||
try:
|
||||
verify_signed_json(
|
||||
|
@ -275,9 +294,25 @@ def reinsert_events(cursor, server_name, signing_key):
|
|||
def main(database, server_name, signing_key):
|
||||
conn = sqlite3.connect(database)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Do other deltas:
|
||||
cursor.execute("PRAGMA user_version")
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row and row[0]:
|
||||
user_version = row[0]
|
||||
# Run every version since after the current version.
|
||||
for v in range(user_version + 1, 10):
|
||||
print "Running delta: %d" % (v,)
|
||||
sql_script = read_schema("delta/v%d" % (v,))
|
||||
cursor.executescript(sql_script)
|
||||
|
||||
reinsert_events(cursor, server_name, signing_key)
|
||||
|
||||
conn.commit()
|
||||
|
||||
print "Success!"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
|
@ -16,4 +16,4 @@
|
|||
""" This is a reference implementation of a synapse home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.5.4a"
|
||||
__version__ = "0.6.0"
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.storage import prepare_database
|
||||
from synapse.storage import prepare_database, UpgradeDatabaseException
|
||||
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
@ -228,8 +228,15 @@ def setup():
|
|||
|
||||
logger.info("Preparing database: %s...", db_name)
|
||||
|
||||
try:
|
||||
with sqlite3.connect(db_name) as db_conn:
|
||||
prepare_database(db_conn)
|
||||
except UpgradeDatabaseException:
|
||||
sys.stderr.write(
|
||||
"\nFailed to upgrade database.\n"
|
||||
"Have you followed any instructions in UPGRADES.rst?\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
logger.info("Database prepared in %s.", db_name)
|
||||
|
||||
|
|
|
@ -123,6 +123,11 @@ class MessageHandler(BaseHandler):
|
|||
|
||||
self.validator.validate_new(builder)
|
||||
|
||||
self.ratelimit(builder.user_id)
|
||||
# TODO(paul): Why does 'event' not have a 'user' object?
|
||||
user = self.hs.parse_userid(builder.user_id)
|
||||
assert self.hs.is_mine(user), "User must be our own: %s" % (user,)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
if membership == Membership.JOIN:
|
||||
|
|
|
@ -203,9 +203,10 @@ class StateHandler(object):
|
|||
}
|
||||
|
||||
if event_type:
|
||||
prev_states = conflicted_state.get(
|
||||
(event_type, state_key), {}
|
||||
).keys()
|
||||
prev_states_events = conflicted_state.get(
|
||||
(event_type, state_key), []
|
||||
)
|
||||
prev_states = [s.event_id for s in prev_states_events]
|
||||
else:
|
||||
prev_states = []
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ SCHEMAS = [
|
|||
|
||||
# Remember to update this number every time an incompatible change is made to
|
||||
# database schema files, so the users will be informed on server restarts.
|
||||
SCHEMA_VERSION = 9
|
||||
SCHEMA_VERSION = 10
|
||||
|
||||
|
||||
class _RollbackButIsFineException(Exception):
|
||||
|
@ -446,6 +446,14 @@ def read_schema(schema):
|
|||
return schema_file.read()
|
||||
|
||||
|
||||
class PrepareDatabaseException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UpgradeDatabaseException(PrepareDatabaseException):
|
||||
pass
|
||||
|
||||
|
||||
def prepare_database(db_conn):
|
||||
""" Set up all the dbs. Since all the *.sql have IF NOT EXISTS, so we
|
||||
don't have to worry about overwriting existing content.
|
||||
|
@ -470,6 +478,10 @@ def prepare_database(db_conn):
|
|||
|
||||
# Run every version since after the current version.
|
||||
for v in range(user_version + 1, SCHEMA_VERSION + 1):
|
||||
if v == 10:
|
||||
raise UpgradeDatabaseException(
|
||||
"No delta for version 10"
|
||||
)
|
||||
sql_script = read_schema("delta/v%d" % (v))
|
||||
c.executescript(sql_script)
|
||||
|
||||
|
|
|
@ -20,4 +20,60 @@ CREATE TABLE IF NOT EXISTS destinations(
|
|||
retry_interval INTEGER
|
||||
);
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS local_media_repository (
|
||||
media_id TEXT, -- The id used to refer to the media.
|
||||
media_type TEXT, -- The MIME-type of the media.
|
||||
media_length INTEGER, -- Length of the media in bytes.
|
||||
created_ts INTEGER, -- When the content was uploaded in ms.
|
||||
upload_name TEXT, -- The name the media was uploaded with.
|
||||
user_id TEXT, -- The user who uploaded the file.
|
||||
CONSTRAINT uniqueness UNIQUE (media_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails (
|
||||
media_id TEXT, -- The id used to refer to the media.
|
||||
thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
|
||||
thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
|
||||
thumbnail_type TEXT, -- The MIME-type of the thumbnail.
|
||||
thumbnail_method TEXT, -- The method used to make the thumbnail.
|
||||
thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
|
||||
CONSTRAINT uniqueness UNIQUE (
|
||||
media_id, thumbnail_width, thumbnail_height, thumbnail_type
|
||||
)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS local_media_repository_thumbnails_media_id
|
||||
ON local_media_repository_thumbnails (media_id);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS remote_media_cache (
|
||||
media_origin TEXT, -- The remote HS the media came from.
|
||||
media_id TEXT, -- The id used to refer to the media on that server.
|
||||
media_type TEXT, -- The MIME-type of the media.
|
||||
created_ts INTEGER, -- When the content was uploaded in ms.
|
||||
upload_name TEXT, -- The name the media was uploaded with.
|
||||
media_length INTEGER, -- Length of the media in bytes.
|
||||
filesystem_id TEXT, -- The name used to store the media on disk.
|
||||
CONSTRAINT uniqueness UNIQUE (media_origin, media_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails (
|
||||
media_origin TEXT, -- The remote HS the media came from.
|
||||
media_id TEXT, -- The id used to refer to the media.
|
||||
thumbnail_width INTEGER, -- The width of the thumbnail in pixels.
|
||||
thumbnail_height INTEGER, -- The height of the thumbnail in pixels.
|
||||
thumbnail_method TEXT, -- The method used to make the thumbnail
|
||||
thumbnail_type TEXT, -- The MIME-type of the thumbnail.
|
||||
thumbnail_length INTEGER, -- The length of the thumbnail in bytes.
|
||||
filesystem_id TEXT, -- The name used to store the media on disk.
|
||||
CONSTRAINT uniqueness UNIQUE (
|
||||
media_origin, media_id, thumbnail_width, thumbnail_height,
|
||||
thumbnail_type, thumbnail_type
|
||||
)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS remote_media_cache_thumbnails_media_id
|
||||
ON local_media_repository_thumbnails (media_id);
|
||||
|
||||
|
||||
PRAGMA user_version = 9;
|
Loading…
Reference in New Issue