Scale the batch size so that we're not bitten by the minimum

This commit is contained in:
Erik Johnston 2016-09-05 15:49:57 +01:00
parent a7032abb2e
commit 0595413c0f
1 changed files with 5 additions and 1 deletions

View File

@ -649,6 +649,10 @@ class StateStore(SQLBaseStore):
rows_inserted = progress.get("rows_inserted", 0)
max_group = progress.get("max_group", None)
BATCH_SIZE_SCALE_FACTOR = 100
batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
if max_group is None:
rows = yield self._execute(
"_background_deduplicate_state", None,
@ -779,4 +783,4 @@ class StateStore(SQLBaseStore):
if finished:
yield self._end_background_update(self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME)
defer.returnValue(result)
defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR)