Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Remark storage
Browse files Browse the repository at this point in the history
  • Loading branch information
arkpar committed Jan 19, 2022
1 parent bea8f32 commit 2accec8
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 35 deletions.
120 changes: 110 additions & 10 deletions client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ pub type DbHash = sp_core::H256;
/// This is used as block body when storage-chain mode is enabled.
#[derive(Debug, Encode, Decode)]
struct ExtrinsicHeader {
/// Hash of the indexed part
indexed_hash: DbHash, // Zero hash if there's no indexed data
/// Hash of the indexed part, if any
indexed_hash: Option<DbHash>,
/// The rest of the data.
data: Vec<u8>,
}
Expand Down Expand Up @@ -576,7 +576,7 @@ impl<Block: BlockT> sc_client_api::blockchain::Backend<Block> for BlockchainDb<B
let extrinsics: ClientResult<Vec<Block::Extrinsic>> = index
.into_iter()
.map(|ExtrinsicHeader { indexed_hash, data }| {
let decode_result = if indexed_hash != Default::default() {
let decode_result = if let Some(indexed_hash) = indexed_hash {
match self.db.get(columns::TRANSACTION, indexed_hash.as_ref()) {
Some(t) => {
let mut input =
Expand Down Expand Up @@ -658,7 +658,7 @@ impl<Block: BlockT> sc_client_api::blockchain::Backend<Block> for BlockchainDb<B
Ok(index) => {
let mut transactions = Vec::new();
for ExtrinsicHeader { indexed_hash, .. } in index.into_iter() {
if indexed_hash != Default::default() {
if let Some(indexed_hash) = indexed_hash {
match self.db.get(columns::TRANSACTION, indexed_hash.as_ref()) {
Some(t) => transactions.push(t),
None =>
Expand Down Expand Up @@ -1319,6 +1319,37 @@ impl<Block: BlockT> Backend<Block> {
match self.transaction_storage {
TransactionStorageMode::BlockBody => {
transaction.set_from_vec(columns::BODY, &lookup_key, body.encode());
// Store indexed data.
let mut hashes = Vec::new();
for op in operation.index_ops {
match op {
IndexOperation::Insert { extrinsic, hash, size } => {
let hash = DbHash::from_slice(hash.as_ref());
let extrinsic = body[extrinsic as usize].encode();
if size as usize <= extrinsic.len() {
let offset = extrinsic.len() - size as usize;
transaction.store(
columns::TRANSACTION,
hash,
extrinsic[offset..].to_vec(),
);
hashes.push(hash);
} else {
debug!(target: "db", "Commit: ignored incorrect index.");
}
},
IndexOperation::Renew { .. } => {
debug!(target: "db", "Commit: ignored indexed data renew.");
},
}
}
if !hashes.is_empty() {
transaction.set_from_vec(
columns::TRANSACTION,
&lookup_key,
hashes.encode(),
);
}
},
TransactionStorageMode::StorageChain => {
let body =
Expand Down Expand Up @@ -1672,7 +1703,7 @@ impl<Block: BlockT> Backend<Block> {
let mut hash = h.clone();
// Follow displaced chains back until we reach a finalized block.
// Since leaves are discarded due to finality, they can't have parents
// that are canonical, but not yet finalized. So we stop deletig as soon as
// that are canonical, but not yet finalized. So we stop deleting as soon as
// we reach canonical chain.
while self.blockchain.hash(number)? != Some(hash.clone()) {
let id = BlockId::<Block>::hash(hash.clone());
Expand Down Expand Up @@ -1706,12 +1737,38 @@ impl<Block: BlockT> Backend<Block> {
id,
)?;
match self.transaction_storage {
TransactionStorageMode::BlockBody => {},
TransactionStorageMode::BlockBody => {
if let Some(hashes) = read_db(
&*self.storage.db,
columns::KEY_LOOKUP,
columns::TRANSACTION,
id,
)? {
match Vec::<DbHash>::decode(&mut &hashes[..]) {
Ok(hashes) =>
for hash in hashes {
transaction.release(columns::TRANSACTION, hash);
},
Err(err) =>
return Err(sp_blockchain::Error::Backend(format!(
"Error decoding indexed hash list: {}",
err
))),
}
utils::remove_from_db(
transaction,
&*self.storage.db,
columns::KEY_LOOKUP,
columns::TRANSACTION,
id,
)?;
}
},
TransactionStorageMode::StorageChain => {
match Vec::<ExtrinsicHeader>::decode(&mut &body[..]) {
Ok(body) =>
for ExtrinsicHeader { indexed_hash, .. } in body {
if indexed_hash != Default::default() {
if let Some(indexed_hash) = indexed_hash {
transaction.release(columns::TRANSACTION, indexed_hash);
}
},
Expand Down Expand Up @@ -1784,7 +1841,7 @@ fn apply_index_ops<Block: BlockT>(
let extrinsic_header = if let Some(hash) = renewed_map.get(&(index as u32)) {
// Bump ref counter
transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()));
ExtrinsicHeader { indexed_hash: hash.clone(), data: extrinsic }
ExtrinsicHeader { indexed_hash: Some(hash.clone()), data: extrinsic }
} else {
match index_map.get(&(index as u32)) {
Some((hash, size)) if *size as usize <= extrinsic.len() => {
Expand All @@ -1795,11 +1852,11 @@ fn apply_index_ops<Block: BlockT>(
extrinsic[offset..].to_vec(),
);
ExtrinsicHeader {
indexed_hash: DbHash::from_slice(hash.as_ref()),
indexed_hash: Some(DbHash::from_slice(hash.as_ref())),
data: extrinsic[..offset].to_vec(),
}
},
_ => ExtrinsicHeader { indexed_hash: Default::default(), data: extrinsic },
_ => ExtrinsicHeader { indexed_hash: None, data: extrinsic },
}
};
extrinsic_headers.push(extrinsic_header);
Expand Down Expand Up @@ -3143,6 +3200,49 @@ pub(crate) mod tests {
assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap());
}

#[test]
fn indexed_data_block_body() {
let backend =
Backend::<Block>::new_test_with_tx_storage(1, 10, TransactionStorageMode::BlockBody);

let x0 = ExtrinsicWrapper::from(0u64).encode();
let x1 = ExtrinsicWrapper::from(1u64).encode();
let x0_hash = <HashFor<Block> as sp_core::Hasher>::hash(&x0[1..]);
let x1_hash = <HashFor<Block> as sp_core::Hasher>::hash(&x1[1..]);
let index = vec![
IndexOperation::Insert {
extrinsic: 0,
hash: x0_hash.as_ref().to_vec(),
size: (x0.len() - 1) as u32,
},
IndexOperation::Insert {
extrinsic: 1,
hash: x1_hash.as_ref().to_vec(),
size: (x1.len() - 1) as u32,
},
];
let hash = insert_block(
&backend,
0,
Default::default(),
None,
Default::default(),
vec![0u64.into(), 1u64.into()],
Some(index),
)
.unwrap();
let bc = backend.blockchain();
assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[1..]);
assert_eq!(bc.indexed_transaction(&x1_hash).unwrap().unwrap(), &x1[1..]);

// Push one more blocks and make sure block is pruned and transaction index is cleared.
insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap();
backend.finalize_block(BlockId::Number(1), None).unwrap();
assert_eq!(bc.body(BlockId::Number(0)).unwrap(), None);
assert_eq!(bc.indexed_transaction(&x0_hash).unwrap(), None);
assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None);
}

#[test]
fn renew_transaction_storage() {
let backend =
Expand Down
6 changes: 6 additions & 0 deletions frame/system/benchmarking/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,12 @@ benchmarks! {
let caller = whitelisted_caller();
}: _(RawOrigin::Signed(caller), remark_message)

remark_with_index {
let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32;
let remark_message = vec![1; b as usize];
let caller = whitelisted_caller();
}: _(RawOrigin::Signed(caller), remark_message)

set_heap_pages {
}: _(RawOrigin::Root, Default::default())

Expand Down
22 changes: 22 additions & 0 deletions frame/system/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -493,6 +493,26 @@ pub mod pallet {
Self::deposit_event(Event::Remarked { sender: who, hash });
Ok(().into())
}

/// Make some on-chain remark and emit event.
///
/// # <weight>
/// - `O(b)` where b is the length of the remark.
/// - 1 event.
/// # </weight>
#[pallet::weight(T::SystemWeightInfo::remark_with_event(remark.len() as u32))]
//#[pallet::weight(T::SystemWeightInfo::remark_with_index(remark.len() as u32))]
pub fn remark_with_index(
origin: OriginFor<T>,
remark: Vec<u8>,
) -> DispatchResultWithPostInfo {
ensure_signed(origin)?;
let content_hash = sp_io::hashing::blake2_256(&remark);
let extrinsic_index = <frame_system::Pallet<T>>::extrinsic_index()
.ok_or_else(|| Error::<T>::BadContext)?;
sp_io::transaction_index::index(extrinsic_index, remark.len() as u32, content_hash);
Ok(().into())
}
}

/// Event for the System pallet.
Expand Down Expand Up @@ -535,6 +555,8 @@ pub mod pallet {
NonZeroRefCount,
/// The origin filter prevent the call to be dispatched.
CallFiltered,
/// Attempted to call `remark_with_index` outside of block execution.
BadContext,
}

/// Exposed trait-generic origin type.
Expand Down
64 changes: 39 additions & 25 deletions frame/system/src/weights.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// This file is part of Substrate.

// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0

// Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -18,7 +18,7 @@
//! Autogenerated weights for frame_system
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2022-01-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128

// Executed Command:
Expand All @@ -35,7 +35,6 @@
// --output=./frame/system/src/weights.rs
// --template=./.maintain/frame-weight-template.hbs


#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
Expand All @@ -47,6 +46,7 @@ use sp_std::marker::PhantomData;
pub trait WeightInfo {
fn remark(b: u32, ) -> Weight;
fn remark_with_event(b: u32, ) -> Weight;
fn remark_with_index(b: u32, ) -> Weight;
fn set_heap_pages() -> Weight;
fn set_storage(i: u32, ) -> Weight;
fn kill_storage(i: u32, ) -> Weight;
Expand All @@ -56,80 +56,94 @@ pub trait WeightInfo {
/// Weights for frame_system using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: crate::Config> WeightInfo for SubstrateWeight<T> {
fn remark(b: u32, ) -> Weight {
(574_000 as Weight)
fn remark(_b: u32, ) -> Weight {
(0 as Weight)
}
fn remark_with_event(b: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(b as Weight))
}
fn remark_with_event(b: u32, ) -> Weight {
// Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0)
fn remark_with_index(b: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((2_000 as Weight).saturating_mul(b as Weight))
.saturating_add((1_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
}
// Storage: System Digest (r:1 w:1)
// Storage: unknown [0x3a686561707061676573] (r:0 w:1)
fn set_heap_pages() -> Weight {
(1_891_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(3_000_000 as Weight)
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
// Storage: Skipped Metadata (r:0 w:0)
fn set_storage(i: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((848_000 as Weight).saturating_mul(i as Weight))
.saturating_add((385_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
// Storage: Skipped Metadata (r:0 w:0)
fn kill_storage(i: u32, ) -> Weight {
(308_000 as Weight)
(0 as Weight)
// Standard Error: 0
.saturating_add((559_000 as Weight).saturating_mul(i as Weight))
.saturating_add((281_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
// Storage: Skipped Metadata (r:0 w:0)
fn kill_prefix(p: u32, ) -> Weight {
(7_616_000 as Weight)
(0 as Weight)
// Standard Error: 1_000
.saturating_add((783_000 as Weight).saturating_mul(p as Weight))
.saturating_add((759_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight)))
}
}

// For backwards compatibility and tests
impl WeightInfo for () {
fn remark(b: u32, ) -> Weight {
(574_000 as Weight)
fn remark(_b: u32, ) -> Weight {
(0 as Weight)
}
fn remark_with_event(b: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(b as Weight))
}
fn remark_with_event(b: u32, ) -> Weight {
// Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0)
fn remark_with_index(b: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((2_000 as Weight).saturating_mul(b as Weight))
.saturating_add((1_000 as Weight).saturating_mul(b as Weight))
.saturating_add(RocksDbWeight::get().reads(1 as Weight))
}
// Storage: System Digest (r:1 w:1)
// Storage: unknown [0x3a686561707061676573] (r:0 w:1)
fn set_heap_pages() -> Weight {
(1_891_000 as Weight)
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
(3_000_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(1 as Weight))
.saturating_add(RocksDbWeight::get().writes(2 as Weight))
}
// Storage: Skipped Metadata (r:0 w:0)
fn set_storage(i: u32, ) -> Weight {
(0 as Weight)
// Standard Error: 0
.saturating_add((848_000 as Weight).saturating_mul(i as Weight))
.saturating_add((385_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
// Storage: Skipped Metadata (r:0 w:0)
fn kill_storage(i: u32, ) -> Weight {
(308_000 as Weight)
(0 as Weight)
// Standard Error: 0
.saturating_add((559_000 as Weight).saturating_mul(i as Weight))
.saturating_add((281_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight)))
}
// Storage: Skipped Metadata (r:0 w:0)
fn kill_prefix(p: u32, ) -> Weight {
(7_616_000 as Weight)
(0 as Weight)
// Standard Error: 1_000
.saturating_add((783_000 as Weight).saturating_mul(p as Weight))
.saturating_add((759_000 as Weight).saturating_mul(p as Weight))
.saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight)))
}
}

0 comments on commit 2accec8

Please sign in to comment.