Skip to content

Commit

Permalink
state-db: Print warning when using large pruning window on RocksDb (p…
Browse files Browse the repository at this point in the history
…aritytech#13414)

* state-db: Print warning when using large pruning window on RocksDb

This pr changes state-db to print a warning when using a large pruning window and running with a
database that isn't supporting ref-counting like RocksDb. This makes the user aware of potential out
of memory errors because this option together with RocksDb etc puts the entire pruning window into
memory. Besides that the pr introduces `LOG_TARGET` for having the target declared central!

* Review comments
  • Loading branch information
bkchr authored and nathanwhit committed Jul 19, 2023
1 parent 4e54f89 commit 8bdbcd5
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 18 deletions.
4 changes: 3 additions & 1 deletion client/state-db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ use std::{
fmt,
};

const LOG_TARGET: &str = "state-db";
const LOG_TARGET_PIN: &str = "state-db::pin";
const PRUNING_MODE: &[u8] = b"mode";
const PRUNING_MODE_ARCHIVE: &[u8] = b"archive";
const PRUNING_MODE_ARCHIVE_CANON: &[u8] = b"archive_canonical";
Expand Down Expand Up @@ -309,7 +311,7 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> StateDbSync<BlockHash, Key, D> {
ref_counting: bool,
db: D,
) -> Result<StateDbSync<BlockHash, Key, D>, Error<D::Error>> {
trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting);
trace!(target: LOG_TARGET, "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting);

let non_canonical: NonCanonicalOverlay<BlockHash, Key> = NonCanonicalOverlay::new(&db)?;
let pruning: Option<RefWindow<BlockHash, Key, D>> = match mode {
Expand Down
47 changes: 36 additions & 11 deletions client/state-db/src/noncanonical.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
//! Maintains trees of block overlays and allows discarding trees/roots
//! The overlays are added in `insert` and removed in `canonicalize`.

use crate::{LOG_TARGET, LOG_TARGET_PIN};

use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError};
use codec::{Decode, Encode};
use log::trace;
Expand Down Expand Up @@ -178,7 +180,12 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
let mut values = HashMap::new();
if let Some((ref hash, mut block)) = last_canonicalized {
// read the journal
trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash);
trace!(
target: LOG_TARGET,
"Reading uncanonicalized journal. Last canonicalized #{} ({:?})",
block,
hash
);
let mut total: u64 = 0;
block += 1;
loop {
Expand All @@ -198,7 +205,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
};
insert_values(&mut values, record.inserted);
trace!(
target: "state-db",
target: LOG_TARGET,
"Uncanonicalized journal entry {}.{} ({:?}) ({} inserted, {} deleted)",
block,
index,
Expand All @@ -217,7 +224,11 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
levels.push_back(level);
block += 1;
}
trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total);
trace!(
target: LOG_TARGET,
"Finished reading uncanonicalized journal, {} entries",
total
);
}
Ok(NonCanonicalOverlay {
last_canonicalized,
Expand Down Expand Up @@ -252,7 +263,9 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
} else if self.last_canonicalized.is_some() {
if number < front_block_number || number > front_block_number + self.levels.len() as u64
{
trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})",
trace!(
target: LOG_TARGET,
"Failed to insert block {}, current is {} .. {})",
number,
front_block_number,
front_block_number + self.levels.len() as u64,
Expand Down Expand Up @@ -284,7 +297,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {

if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize {
trace!(
target: "state-db",
target: LOG_TARGET,
"Too many sibling blocks at #{number}: {:?}",
level.blocks.iter().map(|b| &b.hash).collect::<Vec<_>>()
);
Expand Down Expand Up @@ -314,7 +327,15 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
deleted: changeset.deleted,
};
commit.meta.inserted.push((journal_key, journal_record.encode()));
trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)", number, index, hash, journal_record.inserted.len(), journal_record.deleted.len());
trace!(
target: LOG_TARGET,
"Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)",
number,
index,
hash,
journal_record.inserted.len(),
journal_record.deleted.len()
);
insert_values(&mut self.values, journal_record.inserted);
Ok(commit)
}
Expand Down Expand Up @@ -368,7 +389,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
hash: &BlockHash,
commit: &mut CommitSet<Key>,
) -> Result<u64, StateDbError> {
trace!(target: "state-db", "Canonicalizing {:?}", hash);
trace!(target: LOG_TARGET, "Canonicalizing {:?}", hash);
let level = match self.levels.pop_front() {
Some(level) => level,
None => return Err(StateDbError::InvalidBlock),
Expand Down Expand Up @@ -432,7 +453,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
.meta
.inserted
.push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode()));
trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len());
trace!(target: LOG_TARGET, "Discarding {} records", commit.meta.deleted.len());

let num = canonicalized.1;
self.last_canonicalized = Some(canonicalized);
Expand Down Expand Up @@ -479,7 +500,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
};
// Check that it does not have any children
if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) {
log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash);
log::debug!(target: LOG_TARGET, "Trying to remove block {:?} with children", hash);
return None
}
let overlay = level.remove(index);
Expand All @@ -502,7 +523,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
pub fn pin(&mut self, hash: &BlockHash) {
let refs = self.pinned.entry(hash.clone()).or_default();
if *refs == 0 {
trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash);
trace!(target: LOG_TARGET_PIN, "Pinned non-canon block: {:?}", hash);
}
*refs += 1;
}
Expand Down Expand Up @@ -531,7 +552,11 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
entry.get_mut().1 -= 1;
if entry.get().1 == 0 {
let (inserted, _) = entry.remove();
trace!(target: "state-db-pin", "Discarding unpinned non-canon block: {:?}", hash);
trace!(
target: LOG_TARGET_PIN,
"Discarding unpinned non-canon block: {:?}",
hash
);
discard_values(&mut self.values, inserted);
self.parents.remove(&hash);
}
Expand Down
38 changes: 32 additions & 6 deletions client/state-db/src/pruning.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

use crate::{
noncanonical::LAST_CANONICAL, to_meta_key, CommitSet, Error, Hash, MetaDb, StateDbError,
DEFAULT_MAX_BLOCK_CONSTRAINT,
DEFAULT_MAX_BLOCK_CONSTRAINT, LOG_TARGET,
};
use codec::{Decode, Encode};
use log::trace;
Expand Down Expand Up @@ -79,14 +79,24 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> DeathRowQueue<BlockHash, Key, D> {
death_index: HashMap::new(),
};
// read the journal
trace!(target: "state-db", "Reading pruning journal for the memory queue. Pending #{}", base);
trace!(
target: LOG_TARGET,
"Reading pruning journal for the memory queue. Pending #{}",
base,
);
loop {
let journal_key = to_journal_key(block);
match db.get_meta(&journal_key).map_err(Error::Db)? {
Some(record) => {
let record: JournalRecord<BlockHash, Key> =
Decode::decode(&mut record.as_slice())?;
trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len());
trace!(
target: LOG_TARGET,
"Pruning journal entry {} ({} inserted, {} deleted)",
block,
record.inserted.len(),
record.deleted.len(),
);
queue.import(base, block, record);
},
None => break,
Expand All @@ -107,21 +117,25 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> DeathRowQueue<BlockHash, Key, D> {
// limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT`
let cache_capacity = window_size.clamp(1, DEFAULT_MAX_BLOCK_CONSTRAINT) as usize;
let mut cache = VecDeque::with_capacity(cache_capacity);
trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base);
trace!(
target: LOG_TARGET,
"Reading pruning journal for the database-backed queue. Pending #{}",
base
);
DeathRowQueue::load_batch_from_db(&db, &mut cache, base, cache_capacity)?;
Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, last })
}

/// import a new block to the back of the queue
fn import(&mut self, base: u64, num: u64, journal_record: JournalRecord<BlockHash, Key>) {
let JournalRecord { hash, inserted, deleted } = journal_record;
trace!(target: "state-db", "Importing {}, base={}", num, base);
trace!(target: LOG_TARGET, "Importing {}, base={}", num, base);
match self {
DeathRowQueue::DbBacked { cache, cache_capacity, last, .. } => {
// If the new block continues cached range and there is space, load it directly into
// cache.
if num == base + cache.len() as u64 && cache.len() < *cache_capacity {
trace!(target: "state-db", "Adding to DB backed cache {:?} (#{})", hash, num);
trace!(target: LOG_TARGET, "Adding to DB backed cache {:?} (#{})", hash, num);
cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() });
}
*last = Some(num);
Expand Down Expand Up @@ -306,6 +320,18 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> RefWindow<BlockHash, Key, D> {
};

let queue = if count_insertions {
// Highly scientific crafted number for deciding when to print the warning!
//
// Rocksdb doesn't support refcounting and requires that we load the entire pruning
// window into the memory.
if window_size > 1000 {
log::warn!(
target: LOG_TARGET,
"Large pruning window of {window_size} detected! THIS CAN LEAD TO HIGH MEMORY USAGE AND CRASHES. \
Reduce the pruning window or switch your database to paritydb."
);
}

DeathRowQueue::new_mem(&db, base)?
} else {
let last = match last_canonicalized_number {
Expand Down

0 comments on commit 8bdbcd5

Please sign in to comment.