Skip to content

Commit

Permalink
Bump Substrate (#871)
Browse files Browse the repository at this point in the history
* Bump Substrate

* Change usage of "Module" to "Pallet"

Related Substrate PR: paritytech/substrate#8372

* Add `OnSetCode` config param

Related Substrate PR: paritytech/substrate#8496

* Update Aura Slot duration time type

Related Substrate PR: paritytech/substrate#8386

* Add `OnSetCode` to mock runtimes

* Add support for multiple justifications

Related Substrate PR: paritytech/substrate#7640

* Use updated justification type in more places

* Make GenesisConfig type non-optional

Related Substrate PR: paritytech/substrate#8275

* Update service to use updated telemetry

Related Substrate PR: paritytech/substrate#8143

* Appease Clippy
  • Loading branch information
HCastano authored Apr 7, 2021
1 parent 7723866 commit 08c7ea4
Show file tree
Hide file tree
Showing 33 changed files with 978 additions and 667 deletions.
749 changes: 477 additions & 272 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions bin/millau/node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", bra
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" }
Expand Down
28 changes: 14 additions & 14 deletions bin/millau/node/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ impl Alternative {
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
get_account_id_from_seed::<sr25519::Public>("George//stash"),
get_account_id_from_seed::<sr25519::Public>("Harry//stash"),
pallet_bridge_messages::Module::<
pallet_bridge_messages::Pallet::<
millau_runtime::Runtime,
pallet_bridge_messages::DefaultInstance,
>::relayer_fund_account_id(),
Expand Down Expand Up @@ -154,33 +154,33 @@ fn testnet_genesis(
_enable_println: bool,
) -> GenesisConfig {
GenesisConfig {
frame_system: Some(SystemConfig {
frame_system: SystemConfig {
code: WASM_BINARY.to_vec(),
changes_trie_config: Default::default(),
}),
pallet_balances: Some(BalancesConfig {
},
pallet_balances: BalancesConfig {
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(),
}),
pallet_aura: Some(AuraConfig {
},
pallet_aura: AuraConfig {
authorities: Vec::new(),
}),
pallet_grandpa: Some(GrandpaConfig {
},
pallet_grandpa: GrandpaConfig {
authorities: Vec::new(),
}),
pallet_sudo: Some(SudoConfig { key: root_key }),
pallet_session: Some(SessionConfig {
},
pallet_sudo: SudoConfig { key: root_key },
pallet_session: SessionConfig {
keys: initial_authorities
.iter()
.map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone())))
.collect::<Vec<_>>(),
}),
pallet_bridge_grandpa_Instance1: Some(BridgeWestendGrandpaConfig {
},
pallet_bridge_grandpa_Instance1: BridgeWestendGrandpaConfig {
// for our deployments to avoid multiple same-nonces transactions:
// //Alice is already used to initialize Rialto<->Millau bridge
// => let's use //George to initialize Westend->Millau bridge
owner: Some(get_account_id_from_seed::<sr25519::Public>("George")),
..Default::default()
}),
},
}
}

Expand Down
156 changes: 100 additions & 56 deletions bin/millau/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,13 @@

use millau_runtime::{self, opaque::Block, RuntimeApi};
use sc_client_api::{ExecutorProvider, RemoteBackend};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
use sc_finality_grandpa::SharedVoterState;
use sc_keystore::LocalKeystore;
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use sp_inherents::InherentDataProviders;
use std::sync::Arc;
Expand Down Expand Up @@ -70,19 +72,38 @@ pub fn new_partial(
AuraPair,
>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
),
>,
ServiceError,
> {
if config.keystore_remote.is_some() {
return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()));
}
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
let inherent_data_providers = InherentDataProviders::new();

let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(&config)?;
let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let client = Arc::new(client);

let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});

let select_chain = sc_consensus::LongestChain::new(backend.clone());

let transaction_pool = sc_transaction_pool::BasicPool::new_full(
Expand All @@ -93,22 +114,28 @@ pub fn new_partial(
client.clone(),
);

let (grandpa_block_import, grandpa_link) =
sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?;
let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;

let aura_block_import =
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());

let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>(
sc_consensus_aura::slot_duration(&*client)?,
aura_block_import.clone(),
Some(Box::new(grandpa_block_import)),
client.clone(),
inherent_data_providers.clone(),
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
)?;
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
block_import: aura_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import)),
client: client.clone(),
inherent_data_providers: inherent_data_providers.clone(),
spawner: &task_manager.spawn_essential_handle(),
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;

Ok(sc_service::PartialComponents {
client,
Expand All @@ -119,7 +146,7 @@ pub fn new_partial(
select_chain,
transaction_pool,
inherent_data_providers,
other: (aura_block_import, grandpa_link),
other: (aura_block_import, grandpa_link, telemetry),
})
}

Expand All @@ -141,7 +168,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
select_chain,
transaction_pool,
inherent_data_providers,
other: (block_import, grandpa_link),
other: (block_import, grandpa_link, mut telemetry),
} = new_partial(&config)?;

if let Some(url) = &config.keystore_remote {
Expand Down Expand Up @@ -173,13 +200,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
})?;

if config.offchain_worker.enabled {
sc_service::build_offchain_workers(
&config,
backend.clone(),
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
}

let role = config.role.clone();
Expand Down Expand Up @@ -265,7 +286,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
})
};

let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
keystore: keystore_container.sync_keystore(),
Expand All @@ -278,32 +299,35 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
network_status_sinks,
system_rpc_tx,
config,
telemetry_span: None,
telemetry: telemetry.as_mut(),
})?;

if role.is_authority() {
let proposer = sc_basic_authorship::ProposerFactory::new(
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool,
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);

let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());

let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _, _>(
sc_consensus_aura::slot_duration(&*client)?,
client.clone(),
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _>(StartAuraParams {
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
client: client.clone(),
select_chain,
block_import,
proposer,
network.clone(),
proposer_factory,
inherent_data_providers,
force_authoring,
backoff_authoring_blocks,
keystore_container.sync_keystore(),
keystore: keystore_container.sync_keystore(),
can_author_with,
)?;
sync_oracle: network.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;

// the AURA authoring task is considered essential, i.e. if it
// fails we take down the service with it.
Expand All @@ -326,6 +350,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
observer_enabled: false,
keystore,
is_authority: role.is_authority(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};

if enable_grandpa {
Expand All @@ -339,10 +364,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
config: grandpa_config,
link: grandpa_link,
network,
telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()),
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};

// the GRANDPA voter task is considered infallible, i.e.
Expand All @@ -358,8 +383,27 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>

/// Builds a new service for a light client.
pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError> {
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;

let mut telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});

config
.network
Expand All @@ -376,22 +420,28 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
on_demand.clone(),
));

let (grandpa_block_import, _) =
sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?;
let (grandpa_block_import, _) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain,
telemetry.as_ref().map(|x| x.handle()),
)?;

let aura_block_import =
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());

let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>(
sc_consensus_aura::slot_duration(&*client)?,
aura_block_import,
Some(Box::new(grandpa_block_import)),
client.clone(),
InherentDataProviders::new(),
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
sp_consensus::NeverCanAuthor,
)?;
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
block_import: aura_block_import,
justification_import: Some(Box::new(grandpa_block_import)),
client: client.clone(),
inherent_data_providers: InherentDataProviders::new(),
spawner: &task_manager.spawn_essential_handle(),
can_author_with: sp_consensus::NeverCanAuthor,
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;

let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
Expand All @@ -405,13 +455,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
})?;

if config.offchain_worker.enabled {
sc_service::build_offchain_workers(
&config,
backend.clone(),
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
}

sc_service::spawn_tasks(sc_service::SpawnTasksParams {
Expand All @@ -427,7 +471,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
network,
network_status_sinks,
system_rpc_tx,
telemetry_span: None,
telemetry: telemetry.as_mut(),
})?;

network_starter.start_network();
Expand Down
Loading

0 comments on commit 08c7ea4

Please sign in to comment.