Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Commit

Permalink
Remove debug logging
Browse files Browse the repository at this point in the history
  • Loading branch information
MadLittleMods committed Oct 21, 2021
1 parent 4191f56 commit f39c1da
Show file tree
Hide file tree
Showing 7 changed files with 11 additions and 184 deletions.
4 changes: 0 additions & 4 deletions synapse/federation/federation_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,12 +196,8 @@ async def on_backfill_request(
origin, room_id, versions, limit
)

logger.info("on_backfill_request pdus(%d)=%s", len(pdus), pdus)

res = self._transaction_dict_from_pdus(pdus)

logger.info("on_backfill_request res=%s", res)

return 200, res

async def on_incoming_transaction(
Expand Down
13 changes: 5 additions & 8 deletions synapse/handlers/federation.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,14 +148,14 @@ async def _maybe_backfill_inner(
insertion_events_to_be_backfilled = (
await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
)
logger.info(
logger.debug(
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
oldest_events_with_depth,
insertion_events_to_be_backfilled,
)

if not oldest_events_with_depth and not insertion_events_to_be_backfilled:
logger.info("Not backfilling as no extremeties found.")
logger.debug("Not backfilling as no extremeties found.")
return False

# We only want to paginate if we can actually see the events we'll get,
Expand Down Expand Up @@ -203,7 +203,7 @@ async def _maybe_backfill_inner(
redact=False,
check_history_visibility_only=True,
)
logger.info(
logger.debug(
"_maybe_backfill_inner: filtered_extremities %s", filtered_extremities
)

Expand All @@ -230,7 +230,7 @@ async def _maybe_backfill_inner(
# much larger factor will result in triggering a backfill request much
# earlier than necessary.
if current_depth - 2 * limit > max_depth:
logger.info(
logger.debug(
"Not backfilling as we don't need to. %d < %d - 2 * %d",
max_depth,
current_depth,
Expand All @@ -249,7 +249,7 @@ async def _maybe_backfill_inner(
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
]

logger.info(
logger.debug(
"room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems (%d): %s filtered_sorted_extremeties_tuple: %s",
room_id,
current_depth,
Expand All @@ -271,8 +271,6 @@ async def _maybe_backfill_inner(
# request URI to be too long.
extremities = dict(sorted_extremeties_tuple[:5])

logger.info("backfill extremities=%s", extremities)

# Now we need to decide which hosts to hit first.

# First we try hosts that are already in the room
Expand Down Expand Up @@ -1061,7 +1059,6 @@ async def on_backfill_request(
events = await self.store.get_backfill_events(room_id, pdu_list, limit)

events = await filter_events_for_server(self.storage, origin, events)
logger.info("on_backfill_request resultant events(%d)=%s", len(events), events)

return events

Expand Down
43 changes: 1 addition & 42 deletions synapse/handlers/federation_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,19 +416,6 @@ async def backfill(
events = await self._federation_client.backfill(
dest, room_id, limit=limit, extremities=extremities
)
logger.info(
"from remote server: got backfill response events(%d)=%s",
len(events),
[
{
"event_id": ev.event_id,
"type": ev["type"],
"depth": ev["depth"],
"content": ev["content"].get("body", None),
}
for ev in events
],
)

if not events:
return
Expand All @@ -444,38 +431,10 @@ async def backfill(

await self._process_pulled_events(
dest,
# The /backfill response should start from `?v` and include the
# events that preceded it (so the list will be newest -> oldest). We
# reverse that order so the messages are oldest -> newest and we can
# persist the backfilled events without constantly have to go fetch
# missing prev_events which are probably included in the same
# backfill chunk.
# TODO: If we try to reverse this list, the stream_ordering will be backwards
# reversed(events),
events,
backfilled=True,
)

for ev in events:
event_after_persisted = await self._store.get_event(
ev.event_id, allow_none=True
)

if event_after_persisted:
logger.info(
"from remote server: processed backfilled event_id=%s type=%s depth=%s stream_ordering=%s content=%s",
ev.event_id,
event_after_persisted["type"],
event_after_persisted["depth"],
event_after_persisted.internal_metadata.stream_ordering,
event_after_persisted["content"].get("body", None),
)
else:
logger.info(
"from remote server: processed backfilled event_id=%s failed to lookup",
ev.event_id,
)

async def _get_missing_events_for_pdu(
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
) -> None:
Expand Down Expand Up @@ -1277,7 +1236,7 @@ def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
await self.persist_events_and_notify(
room_id,
tuple(events_to_persist),
# TODO: Maybe this to get fetched missing events during backfill as backfilled also :/
# Events we fetch during backfill should be marked as backfilled as well
backfilled=True,
)

Expand Down
24 changes: 2 additions & 22 deletions synapse/handlers/room_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,6 @@
logger = logging.getLogger(__name__)


def generate_fake_event_id() -> str:
return "$fake_" + random_string(43)


class RoomBatchHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
Expand Down Expand Up @@ -184,7 +180,7 @@ async def persist_state_events_at_start(

# Make the state events float off on their own so we don't have a
# bunch of `@mxid joined the room` noise between each batch
prev_event_ids_for_state_chain = [] # generate_fake_event_id()
prev_event_ids_for_state_chain: List[str] = []

for state_event in state_events_at_start:
assert_params_in_dict(
Expand Down Expand Up @@ -227,15 +223,6 @@ async def persist_state_events_at_start(
# reference and also update in the event when we append later.
auth_event_ids=auth_event_ids.copy(),
)

mem_event = await self.store.get_event(event_id)
logger.info(
"room_batch mem_event_id=%s depth=%s stream_ordering=%s prev_event_ids=%s",
mem_event.event_id,
mem_event.depth,
mem_event.internal_metadata.stream_ordering,
mem_event.prev_event_ids(),
)
else:
# TODO: Add some complement tests that adds state that is not member joins
# and will use this code path. Maybe we only want to support join state events
Expand Down Expand Up @@ -362,20 +349,13 @@ async def persist_historical_events(
# Events are sorted by (topological_ordering, stream_ordering)
# where topological_ordering is just depth.
for (event, context) in reversed(events_to_persist):
result_event = await self.event_creation_handler.handle_new_client_event(
await self.event_creation_handler.handle_new_client_event(
await self.create_requester_for_user_id_from_app_service(
event["sender"], app_service_requester.app_service
),
event=event,
context=context,
)
logger.info(
"result_event depth=%s stream_ordering=%s event_id=%s body=%s",
result_event.depth,
result_event.internal_metadata.stream_ordering,
result_event.event_id,
result_event.content.get("body", None),
)

return event_ids

Expand Down
1 change: 0 additions & 1 deletion synapse/rest/client/room.py
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,6 @@ async def on_GET(
pagination_config = await PaginationConfig.from_request(
self.store, request, default_limit=10
)
logger.info("/messages rest start pagination_config=%s", pagination_config)
# Twisted will have processed the args by now.
assert request.args is not None
as_client_event = b"raw" not in request.args
Expand Down
52 changes: 3 additions & 49 deletions synapse/storage/databases/main/event_federation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1057,11 +1057,6 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
# we process the newest-in-time messages first going backwards in time.
queue = PriorityQueue()

logger.info(
"_get_backfill_events: seeding backfill with event_list(%d)=%s",
len(event_list),
event_list,
)
for event_id in event_list:
event_lookup_result = self.db_pool.simple_select_one_txn(
txn,
Expand All @@ -1075,14 +1070,6 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
allow_none=True,
)

logger.info(
"_get_backfill_events: seeding backfill with event_id=%s type=%s depth=%s stream_ordering=%s",
event_id,
event_lookup_result["type"],
event_lookup_result["depth"],
event_lookup_result["stream_ordering"],
)

if event_lookup_result["depth"]:
queue.put(
(
Expand All @@ -1102,39 +1089,6 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
if event_id in event_results:
continue

event_lookup_result = self.db_pool.simple_select_one_txn(
txn,
table="events",
keyvalues={"event_id": event_id},
retcols=["type", "depth", "stream_ordering", "content"],
allow_none=True,
)

event_json_lookup_result = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_json",
keyvalues={"event_id": event_id},
retcol="json",
allow_none=True,
)

ev = db_to_json(event_json_lookup_result)

if event_lookup_result:
logger.info(
"_get_backfill_events: event_results add event_id=%s type=%s depth=%s stream_ordering=%s content=%s",
event_id,
ev["type"],
ev["depth"],
event_lookup_result["stream_ordering"],
ev["content"].get("body", ev["content"]),
)
else:
logger.info(
"_get_backfill_events: event_results event_id=%s failed to lookup",
event_id,
)

event_results[event_id] = event_id

# Try and find any potential historical batches of message history.
Expand All @@ -1147,7 +1101,7 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
connected_insertion_event_query, (event_id, limit - len(event_results))
)
connected_insertion_event_id_results = txn.fetchall()
logger.info(
logger.debug(
"_get_backfill_events: connected_insertion_event_query %s",
connected_insertion_event_id_results,
)
Expand Down Expand Up @@ -1177,7 +1131,7 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
(event_id, limit - len(event_results)),
)
batch_start_event_id_results = txn.fetchall()
logger.info(
logger.debug(
"_get_backfill_events: batch_start_event_id_results %s",
batch_start_event_id_results,
)
Expand All @@ -1190,7 +1144,7 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
(event_id, False, limit - len(event_results)),
)
prev_event_id_results = txn.fetchall()
logger.info(
logger.debug(
"_get_backfill_events: prev_event_ids %s", prev_event_id_results
)

Expand Down
58 changes: 0 additions & 58 deletions synapse/storage/databases/main/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,14 +169,6 @@ async def _persist_events_and_state_updates(

async with stream_ordering_manager as stream_orderings:
for (event, _), stream in zip(events_and_contexts, stream_orderings):
logger.info(
"_persist_events_and_state_updates backfilled=%s event_id=%s depth=%s stream_ordering=%s content=%s",
backfilled,
event.event_id,
event.depth,
stream,
event["content"].get("body", None),
)
event.internal_metadata.stream_ordering = stream

await self.db_pool.runInteraction(
Expand Down Expand Up @@ -2139,38 +2131,6 @@ def _update_backward_extremeties(self, txn, events):
Forward extremities are handled when we first start persisting the events.
"""
logger.info(
"_update_backward_extremeties events=%s",
[
{
"event_id": ev.event_id,
"prev_events": ev.prev_event_ids(),
"outlier": ev.internal_metadata.is_outlier(),
}
for ev in events
],
)

for ev in events:
for e_id in ev.prev_event_ids():
query = """
SELECT 1 FROM event_edges
INNER JOIN events AS e USING (event_id, room_id)
WHERE event_id = ? AND room_id = ? AND e.outlier = TRUE
"""

txn.execute(
query,
(e_id, ev.room_id),
)
result = txn.fetchall()
logger.info(
"_update_backward_extremeties test ev=%s prev_event_id=%s result=%s",
ev.event_id,
e_id,
result,
)

# From the events passed in, add all of the prev events as backwards extremities.
# Ignore any events that are already backwards extrems or outliers.
query = (
Expand Down Expand Up @@ -2200,24 +2160,6 @@ def _update_backward_extremeties(self, txn, events):
],
)

for ev in events:
for e_id in ev.prev_event_ids():
query = """
SELECT * FROM event_backward_extremities
WHERE event_id = ? AND room_id = ?
"""

txn.execute(
query,
(e_id, ev.room_id),
)
result = txn.fetchall()
logger.info(
"_update_backward_extremeties ended up as prev_event_id=%s result=%s",
e_id,
result,
)

# Delete all these events that we've already fetched and now know that their
# prev events are the new backwards extremeties.
query = (
Expand Down

0 comments on commit f39c1da

Please sign in to comment.