Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Fix message duplication if something goes wrong after persisting the event #8476

Merged
merged 20 commits into from
Oct 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/8476.bugfix
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix message duplication if something goes wrong after persisting the event.
9 changes: 6 additions & 3 deletions synapse/handlers/federation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2966,17 +2966,20 @@ async def persist_events_and_notify(
return result["max_stream_id"]
else:
assert self.storage.persistence
max_stream_token = await self.storage.persistence.persist_events(

# Note that this returns the events that were persisted, which may not be
# the same as were passed in if some were deduplicated due to transaction IDs.
events, max_stream_token = await self.storage.persistence.persist_events(
event_and_contexts, backfilled=backfilled
)

if self._ephemeral_messages_enabled:
for (event, context) in event_and_contexts:
for event in events:
# If there's an expiry timestamp on the event, schedule its expiry.
self._message_handler.maybe_schedule_expiry(event)

if not backfilled: # Never notify for backfilled events
for event, _ in event_and_contexts:
for event in events:
await self._notify_persisted_event(event, max_stream_token)

return max_stream_token.stream
Expand Down
48 changes: 40 additions & 8 deletions synapse/handlers/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,7 @@ async def create_and_send_nonmember_event(
send this event.

Returns:
The event, and its stream ordering (if state event deduplication happened,
The event, and its stream ordering (if deduplication happened,
the previous, duplicate event).

Raises:
Expand All @@ -709,6 +709,19 @@ async def create_and_send_nonmember_event(
# extremities to pile up, which in turn leads to state resolution
# taking longer.
with (await self.limiter.queue(event_dict["room_id"])):
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
event_dict["room_id"],
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
if existing_event_id:
event = await self.store.get_event(existing_event_id)
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
return event, event.internal_metadata.stream_ordering

event, context = await self.create_event(
requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id
)
Expand Down Expand Up @@ -910,10 +923,20 @@ async def handle_new_client_event(
extra_users=extra_users,
)
stream_id = result["stream_id"]
event.internal_metadata.stream_ordering = stream_id
event_id = result["event_id"]
if event_id != event.event_id:
richvdh marked this conversation as resolved.
Show resolved Hide resolved
# If we get a different event back then it means that its
# been de-duplicated, so we replace the given event with the
# one already persisted.
event = await self.store.get_event(event_id)
else:
# If we newly persisted the event then we need to update its
# stream_ordering entry manually (as it was persisted on
# another worker).
event.internal_metadata.stream_ordering = stream_id
return event

stream_id = await self.persist_and_notify_client_event(
event = await self.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)

Expand Down Expand Up @@ -962,11 +985,16 @@ async def persist_and_notify_client_event(
context: EventContext,
ratelimit: bool = True,
extra_users: List[UserID] = [],
) -> int:
) -> EventBase:
"""Called when we have fully built the event, have already
calculated the push actions for the event, and checked auth.

This should only be run on the instance in charge of persisting events.

Returns:
The persisted event. This may be different than the given event if
it was de-duplicated (e.g. because we had already persisted an
event with the same transaction ID.)
"""
assert self.storage.persistence is not None
assert self._events_shard_config.should_handle(
Expand Down Expand Up @@ -1134,9 +1162,13 @@ def is_inviter_member_event(e):
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")

event_pos, max_stream_token = await self.storage.persistence.persist_event(
event, context=context
)
# Note that this returns the event that was persisted, which may not be
# the same as we passed in if it was deduplicated due transaction IDs.
(
event,
event_pos,
max_stream_token,
) = await self.storage.persistence.persist_event(event, context=context)

if self._ephemeral_events_enabled:
# If there's an expiry timestamp on the event, schedule its expiry.
Expand All @@ -1157,7 +1189,7 @@ def _notify():
# matters as sometimes presence code can take a while.
run_in_background(self._bump_active_time, requester.user)

return event_pos.stream
return event

async def _bump_active_time(self, user: UserID) -> None:
try:
Expand Down
13 changes: 12 additions & 1 deletion synapse/handlers/room_member.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,17 @@ async def _local_membership_update(
if requester.is_guest:
content["kind"] = "guest"

# Check if we already have an event with a matching transaction ID. (We
# do this check just before we persist an event as well, but may as well
# do it up front for efficiency.)
if txn_id and requester.access_token_id:
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
existing_event_id = await self.store.get_event_id_from_transaction_id(
room_id, requester.user.to_string(), requester.access_token_id, txn_id,
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
return existing_event_id, event_pos.stream

event, context = await self.event_creation_handler.create_event(
requester,
{
Expand Down Expand Up @@ -679,7 +690,7 @@ async def send_membership_event(
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")

await self.event_creation_handler.handle_new_client_event(
event = await self.event_creation_handler.handle_new_client_event(
requester, event, context, extra_users=[target_user], ratelimit=ratelimit
)

Expand Down
16 changes: 14 additions & 2 deletions synapse/replication/http/send_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,12 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
"ratelimit": true,
"extra_users": [],
}

200 OK

{ "stream_id": 12345, "event_id": "$abcdef..." }

The returned event ID may not match the sent event if it was deduplicated.
"""

NAME = "send_event"
Expand Down Expand Up @@ -116,11 +122,17 @@ async def _handle_request(self, request, event_id):
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
)

stream_id = await self.event_creation_handler.persist_and_notify_client_event(
event = await self.event_creation_handler.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)

return 200, {"stream_id": stream_id}
return (
200,
{
"stream_id": event.internal_metadata.stream_ordering,
"event_id": event.event_id,
},
)


def register_servlets(hs, http_server):
Expand Down
31 changes: 31 additions & 0 deletions synapse/storage/databases/main/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,8 @@ def _persist_events_txn(

self._store_event_txn(txn, events_and_contexts=events_and_contexts)

self._persist_transaction_ids_txn(txn, events_and_contexts)

# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)

Expand Down Expand Up @@ -405,6 +407,35 @@ def _persist_events_txn(
# room_memberships, where applicable.
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)

def _persist_transaction_ids_txn(
self,
txn: LoggingTransaction,
events_and_contexts: List[Tuple[EventBase, EventContext]],
):
"""Persist the mapping from transaction IDs to event IDs (if defined).
"""

to_insert = []
for event, _ in events_and_contexts:
token_id = getattr(event.internal_metadata, "token_id", None)
txn_id = getattr(event.internal_metadata, "txn_id", None)
if token_id and txn_id:
to_insert.append(
{
"event_id": event.event_id,
"room_id": event.room_id,
"user_id": event.sender,
"token_id": token_id,
"txn_id": txn_id,
"inserted_ts": self._clock.time_msec(),
}
)

if to_insert:
self.db_pool.simple_insert_many_txn(
txn, table="event_txn_id", values=to_insert,
)

def _update_current_state_txn(
self,
txn: LoggingTransaction,
Expand Down
83 changes: 82 additions & 1 deletion synapse/storage/databases/main/events_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import itertools
import logging
import threading
Expand Down Expand Up @@ -137,6 +136,15 @@ def __init__(self, database: DatabasePool, db_conn, hs):
db_conn, "events", "stream_ordering", step=-1
)

if not hs.config.worker.worker_app:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can probably be put on the background worker?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ugh, yes but that work isn't in 1.21 :(

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, right. 🤦 I'll update it when this gets merged forward.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So if we're retargetting this to develop, seems like we can make this change!

# We periodically clean out old transaction ID mappings
self._clock.looping_call(
run_as_background_process,
5 * 60 * 1000,
"_cleanup_old_transaction_ids",
self._cleanup_old_transaction_ids,
)

self._get_event_cache = Cache(
"*getEvent*",
keylen=3,
Expand Down Expand Up @@ -1304,3 +1312,76 @@ def get_next_event_to_expire_txn(txn):
return await self.db_pool.runInteraction(
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
)

async def get_event_id_from_transaction_id(
self, room_id: str, user_id: str, token_id: int, txn_id: str
) -> Optional[str]:
"""Look up if we have already persisted an event for the transaction ID,
returning the event ID if so.
"""
return await self.db_pool.simple_select_one_onecol(
table="event_txn_id",
keyvalues={
"room_id": room_id,
"user_id": user_id,
"token_id": token_id,
"txn_id": txn_id,
},
retcol="event_id",
allow_none=True,
desc="get_event_id_from_transaction_id",
)

async def get_already_persisted_events(
self, events: Iterable[EventBase]
) -> Dict[str, str]:
"""Look up if we have already persisted an event for the transaction ID,
returning a mapping from event ID in the given list to the event ID of
an existing event.

Also checks if there are duplicates in the given events, if there are
will map duplicates to the *first* event.
"""

mapping = {}
txn_id_to_event = {} # type: Dict[Tuple[str, int, str], str]

for event in events:
token_id = getattr(event.internal_metadata, "token_id", None)
txn_id = getattr(event.internal_metadata, "txn_id", None)

if token_id and txn_id:
# Check if this is a duplicate of an event in the given events.
existing = txn_id_to_event.get((event.room_id, token_id, txn_id))
if existing:
mapping[event.event_id] = existing
continue

# Check if this is a duplicate of an event we've already
# persisted.
existing = await self.get_event_id_from_transaction_id(
event.room_id, event.sender, token_id, txn_id
)
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know if we want to try and batch these up a bit? The standard simple_select_many doesn't work due to it only supporting one column. There are psycopg2 helper functions that might help us though

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like something similar could be made which takes an iterable of tuples (or dicts)?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, I'm just not sure how much I want to try and do that for something that is going in an RC

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

park it for now, optimise later?

if existing:
mapping[event.event_id] = existing
txn_id_to_event[(event.room_id, token_id, txn_id)] = existing
else:
txn_id_to_event[(event.room_id, token_id, txn_id)] = event.event_id

return mapping

async def _cleanup_old_transaction_ids(self):
"""Cleans out transaction id mappings older than 24hrs.
"""

def _cleanup_old_transaction_ids_txn(txn):
sql = """
DELETE FROM event_txn_id
WHERE inserted_ts < ?
"""
one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One day ago is essentially the largest you could get behind on federation and still not have duplicates?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We only get transaction IDs from local clients, who I believe shouldn't retry if significant time has passed.

txn.execute(sql, (one_day_ago,))

return await self.db_pool.runInteraction(
"_cleanup_old_transaction_ids", _cleanup_old_transaction_ids_txn,
)
6 changes: 5 additions & 1 deletion synapse/storage/databases/main/registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -939,7 +939,7 @@ async def add_access_token_to_user(
token: str,
device_id: Optional[str],
valid_until_ms: Optional[int],
) -> None:
) -> int:
"""Adds an access token for the given user.

Args:
Expand All @@ -949,6 +949,8 @@ async def add_access_token_to_user(
valid_until_ms: when the token is valid until. None for no expiry.
Raises:
StoreError if there was a problem adding this.
Returns:
The token ID
"""
next_id = self._access_tokens_id_gen.get_next()

Expand All @@ -964,6 +966,8 @@ async def add_access_token_to_user(
desc="add_access_token_to_user",
)

return next_id

def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str:
old_device_id = self.db_pool.simple_select_one_onecol_txn(
txn, "access_tokens", {"token": token}, "device_id"
Expand Down
40 changes: 40 additions & 0 deletions synapse/storage/databases/main/schema/delta/58/19txn_id.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/* Copyright 2020 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


-- A map of recent events persisted with transaction IDs. Used to deduplicate
-- send event requests with the same transaction ID.
Comment on lines +17 to +18
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might be nice to mention what these are expected to be deduplicated across (user, device, transaction).

I think most of the other tables we have use the device_id, not the access token ID? Are these mostly synonymous?

--
-- Note: transaction IDs are scoped to the room ID/user ID/access token that was
-- used to make the request.
--
-- Note: The foreign key constraints are ON DELETE CASCADE, as if we delete the
-- events or access token we don't want to try and de-duplicate the event.
CREATE TABLE IF NOT EXISTS event_txn_id (
event_id TEXT NOT NULL,
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
room_id TEXT NOT NULL,
user_id TEXT NOT NULL,
token_id BIGINT NOT NULL,
richvdh marked this conversation as resolved.
Show resolved Hide resolved
txn_id TEXT NOT NULL,
inserted_ts BIGINT NOT NULL,
FOREIGN KEY (event_id)
REFERENCES events (event_id) ON DELETE CASCADE,
FOREIGN KEY (token_id)
REFERENCES access_tokens (id) ON DELETE CASCADE
);

CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_event_id ON event_txn_id(event_id);
CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_txn_id ON event_txn_id(room_id, user_id, token_id, txn_id);
CREATE INDEX IF NOT EXISTS event_txn_id_ts ON event_txn_id(inserted_ts);
Loading