use crate::{
error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm,
TelemetryConnectionSinks, RpcHandlers, NetworkStatusSinks,
start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle,
metrics::MetricsService,
client::{light, Client, ClientConfig},
config::{Configuration, KeystoreConfig, PrometheusConfig},
};
use sc_client_api::{
light::RemoteBlockchain, ForkBlocks, BadBlocks, UsageProvider, ExecutorProvider,
};
use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender};
use sc_chain_spec::get_extension;
use sp_consensus::{
block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain},
import_queue::ImportQueue,
};
use futures::{FutureExt, StreamExt, future::ready, channel::oneshot};
use jsonrpc_pubsub::manager::SubscriptionManager;
use sc_keystore::Store as Keystore;
use log::{info, warn};
use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder};
use sc_network::NetworkService;
use parking_lot::RwLock;
use sp_runtime::generic::BlockId;
use sp_runtime::traits::{
Block as BlockT, SaturatedConversion, HashFor, Zero, BlockIdTo,
};
use sp_api::{ProvideRuntimeApi, CallApiAt};
use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo};
use std::sync::Arc;
use wasm_timer::SystemTime;
use sc_telemetry::{telemetry, SUBSTRATE_INFO};
use sp_transaction_pool::MaintainedTransactionPool;
use prometheus_endpoint::Registry;
use sc_client_db::{Backend, DatabaseSettings};
use sp_core::traits::{CodeExecutor, SpawnNamed};
use sp_runtime::BuildStorage;
use sc_client_api::{
BlockBackend, BlockchainEvents,
backend::StorageProvider,
proof_provider::ProofProvider,
execution_extensions::ExecutionExtensions
};
use sp_blockchain::{HeaderMetadata, HeaderBackend};
pub trait RpcExtensionBuilder {
type Output: sc_rpc::RpcExtension<sc_rpc::Metadata>;
fn build(
&self,
deny: sc_rpc::DenyUnsafe,
subscription_executor: sc_rpc::SubscriptionTaskExecutor,
) -> Self::Output;
}
impl<F, R> RpcExtensionBuilder for F where
F: Fn(sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> R,
R: sc_rpc::RpcExtension<sc_rpc::Metadata>,
{
type Output = R;
fn build(
&self,
deny: sc_rpc::DenyUnsafe,
subscription_executor: sc_rpc::SubscriptionTaskExecutor,
) -> Self::Output {
(*self)(deny, subscription_executor)
}
}
pub struct NoopRpcExtensionBuilder<R>(pub R);
impl<R> RpcExtensionBuilder for NoopRpcExtensionBuilder<R> where
R: Clone + sc_rpc::RpcExtension<sc_rpc::Metadata>,
{
type Output = R;
fn build(
&self,
_deny: sc_rpc::DenyUnsafe,
_subscription_executor: sc_rpc::SubscriptionTaskExecutor,
) -> Self::Output {
self.0.clone()
}
}
impl<R> From<R> for NoopRpcExtensionBuilder<R> where
R: sc_rpc::RpcExtension<sc_rpc::Metadata>,
{
fn from(e: R) -> NoopRpcExtensionBuilder<R> {
NoopRpcExtensionBuilder(e)
}
}
pub type TFullClient<TBl, TRtApi, TExecDisp> = Client<
TFullBackend<TBl>,
TFullCallExecutor<TBl, TExecDisp>,
TBl,
TRtApi,
>;
pub type TFullBackend<TBl> = sc_client_db::Backend<TBl>;
pub type TFullCallExecutor<TBl, TExecDisp> = crate::client::LocalCallExecutor<
sc_client_db::Backend<TBl>,
NativeExecutor<TExecDisp>,
>;
pub type TLightClient<TBl, TRtApi, TExecDisp> = TLightClientWithBackend<
TBl, TRtApi, TExecDisp, TLightBackend<TBl>
>;
pub type TLightBackend<TBl> = sc_light::Backend<
sc_client_db::light::LightStorage<TBl>,
HashFor<TBl>,
>;
pub type TLightCallExecutor<TBl, TExecDisp> = sc_light::GenesisCallExecutor<
sc_light::Backend<
sc_client_db::light::LightStorage<TBl>,
HashFor<TBl>
>,
crate::client::LocalCallExecutor<
sc_light::Backend<
sc_client_db::light::LightStorage<TBl>,
HashFor<TBl>
>,
NativeExecutor<TExecDisp>
>,
>;
type TFullParts<TBl, TRtApi, TExecDisp> = (
TFullClient<TBl, TRtApi, TExecDisp>,
Arc<TFullBackend<TBl>>,
Arc<RwLock<sc_keystore::Store>>,
TaskManager,
);
type TLightParts<TBl, TRtApi, TExecDisp> = (
Arc<TLightClient<TBl, TRtApi, TExecDisp>>,
Arc<TLightBackend<TBl>>,
Arc<RwLock<sc_keystore::Store>>,
TaskManager,
Arc<OnDemand<TBl>>,
);
pub type TLightBackendWithHash<TBl, THash> = sc_light::Backend<
sc_client_db::light::LightStorage<TBl>,
THash,
>;
pub type TLightClientWithBackend<TBl, TRtApi, TExecDisp, TBackend> = Client<
TBackend,
sc_light::GenesisCallExecutor<
TBackend,
crate::client::LocalCallExecutor<TBackend, NativeExecutor<TExecDisp>>,
>,
TBl,
TRtApi,
>;
pub fn new_full_client<TBl, TRtApi, TExecDisp>(
config: &Configuration,
) -> Result<TFullClient<TBl, TRtApi, TExecDisp>, Error> where
TBl: BlockT,
TExecDisp: NativeExecutionDispatch + 'static,
{
new_full_parts(config).map(|parts| parts.0)
}
pub fn new_full_parts<TBl, TRtApi, TExecDisp>(
config: &Configuration,
) -> Result<TFullParts<TBl, TRtApi, TExecDisp>, Error> where
TBl: BlockT,
TExecDisp: NativeExecutionDispatch + 'static,
{
let keystore = match &config.keystore {
KeystoreConfig::Path { path, password } => Keystore::open(
path.clone(),
password.clone()
)?,
KeystoreConfig::InMemory => Keystore::new_in_memory(),
};
let task_manager = {
let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry);
TaskManager::new(config.task_executor.clone(), registry)?
};
let executor = NativeExecutor::<TExecDisp>::new(
config.wasm_method,
config.default_heap_pages,
config.max_runtime_instances,
);
let chain_spec = &config.chain_spec;
let fork_blocks = get_extension::<ForkBlocks<TBl>>(chain_spec.extensions())
.cloned()
.unwrap_or_default();
let bad_blocks = get_extension::<BadBlocks<TBl>>(chain_spec.extensions())
.cloned()
.unwrap_or_default();
let (client, backend) = {
let db_config = sc_client_db::DatabaseSettings {
state_cache_size: config.state_cache_size,
state_cache_child_ratio:
config.state_cache_child_ratio.map(|v| (v, 100)),
pruning: config.pruning.clone(),
source: config.database.clone(),
};
let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new(
config.execution_strategies.clone(),
Some(keystore.clone()),
);
new_client(
db_config,
executor,
chain_spec.as_storage_builder(),
fork_blocks,
bad_blocks,
extensions,
Box::new(task_manager.spawn_handle()),
config.prometheus_config.as_ref().map(|config| config.registry.clone()),
ClientConfig {
offchain_worker_enabled : config.offchain_worker.enabled ,
offchain_indexing_api: config.offchain_worker.indexing_enabled,
},
)?
};
Ok((client, backend, keystore, task_manager))
}
pub fn new_light_parts<TBl, TRtApi, TExecDisp>(
config: &Configuration
) -> Result<TLightParts<TBl, TRtApi, TExecDisp>, Error> where
TBl: BlockT,
TExecDisp: NativeExecutionDispatch + 'static,
{
let task_manager = {
let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry);
TaskManager::new(config.task_executor.clone(), registry)?
};
let keystore = match &config.keystore {
KeystoreConfig::Path { path, password } => Keystore::open(
path.clone(),
password.clone()
)?,
KeystoreConfig::InMemory => Keystore::new_in_memory(),
};
let executor = NativeExecutor::<TExecDisp>::new(
config.wasm_method,
config.default_heap_pages,
config.max_runtime_instances,
);
let db_storage = {
let db_settings = sc_client_db::DatabaseSettings {
state_cache_size: config.state_cache_size,
state_cache_child_ratio:
config.state_cache_child_ratio.map(|v| (v, 100)),
pruning: config.pruning.clone(),
source: config.database.clone(),
};
sc_client_db::light::LightStorage::new(db_settings)?
};
let light_blockchain = sc_light::new_light_blockchain(db_storage);
let fetch_checker = Arc::new(
sc_light::new_fetch_checker::<_, TBl, _>(
light_blockchain.clone(),
executor.clone(),
Box::new(task_manager.spawn_handle()),
),
);
let on_demand = Arc::new(sc_network::config::OnDemand::new(fetch_checker));
let backend = sc_light::new_light_backend(light_blockchain);
let client = Arc::new(light::new_light(
backend.clone(),
config.chain_spec.as_storage_builder(),
executor,
Box::new(task_manager.spawn_handle()),
config.prometheus_config.as_ref().map(|config| config.registry.clone()),
)?);
Ok((client, backend, keystore, task_manager, on_demand))
}
pub fn new_client<E, Block, RA>(
settings: DatabaseSettings,
executor: E,
genesis_storage: &dyn BuildStorage,
fork_blocks: ForkBlocks<Block>,
bad_blocks: BadBlocks<Block>,
execution_extensions: ExecutionExtensions<Block>,
spawn_handle: Box<dyn SpawnNamed>,
prometheus_registry: Option<Registry>,
config: ClientConfig,
) -> Result<(
crate::client::Client<
Backend<Block>,
crate::client::LocalCallExecutor<Backend<Block>, E>,
Block,
RA,
>,
Arc<Backend<Block>>,
),
sp_blockchain::Error,
>
where
Block: BlockT,
E: CodeExecutor + RuntimeInfo,
{
const CANONICALIZATION_DELAY: u64 = 4096;
let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?);
let executor = crate::client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone());
Ok((
crate::client::Client::new(
backend.clone(),
executor,
genesis_storage,
fork_blocks,
bad_blocks,
execution_extensions,
prometheus_registry,
config,
)?,
backend,
))
}
pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> {
pub config: Configuration,
pub client: Arc<TCl>,
pub backend: Arc<Backend>,
pub task_manager: &'a mut TaskManager,
pub keystore: Arc<RwLock<Keystore>>,
pub on_demand: Option<Arc<OnDemand<TBl>>>,
pub transaction_pool: Arc<TExPool>,
pub rpc_extensions_builder: Box<dyn RpcExtensionBuilder<Output = TRpc> + Send>,
pub remote_blockchain: Option<Arc<dyn RemoteBlockchain<TBl>>>,
pub network: Arc<NetworkService<TBl, <TBl as BlockT>::Hash>>,
pub network_status_sinks: NetworkStatusSinks<TBl>,
pub system_rpc_tx: TracingUnboundedSender<sc_rpc::system::Request<TBl>>,
pub telemetry_connection_sinks: TelemetryConnectionSinks,
}
pub fn build_offchain_workers<TBl, TBackend, TCl>(
config: &Configuration,
backend: Arc<TBackend>,
spawn_handle: SpawnTaskHandle,
client: Arc<TCl>,
network: Arc<NetworkService<TBl, <TBl as BlockT>::Hash>>,
) -> Option<Arc<sc_offchain::OffchainWorkers<TCl, TBackend::OffchainStorage, TBl>>>
where
TBl: BlockT, TBackend: sc_client_api::Backend<TBl>,
<TBackend as sc_client_api::Backend<TBl>>::OffchainStorage: 'static,
TCl: Send + Sync + ProvideRuntimeApi<TBl> + BlockchainEvents<TBl> + 'static,
<TCl as ProvideRuntimeApi<TBl>>::Api: sc_offchain::OffchainWorkerApi<TBl>,
{
let offchain_workers = match backend.offchain_storage() {
Some(db) => {
Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db)))
},
None => {
warn!("Offchain workers disabled, due to lack of offchain storage support in backend.");
None
},
};
if let Some(offchain) = offchain_workers.clone() {
spawn_handle.spawn(
"offchain-notifications",
sc_offchain::notification_future(
config.role.is_authority(),
client.clone(),
offchain,
Clone::clone(&spawn_handle),
network.clone(),
)
);
}
offchain_workers
}
pub fn spawn_tasks<TBl, TBackend, TExPool, TRpc, TCl>(
params: SpawnTasksParams<TBl, TCl, TExPool, TRpc, TBackend>,
) -> Result<RpcHandlers, Error>
where
TCl: ProvideRuntimeApi<TBl> + HeaderMetadata<TBl, Error=sp_blockchain::Error> + Chain<TBl> +
BlockBackend<TBl> + BlockIdTo<TBl, Error=sp_blockchain::Error> + ProofProvider<TBl> +
HeaderBackend<TBl> + BlockchainEvents<TBl> + ExecutorProvider<TBl> + UsageProvider<TBl> +
StorageProvider<TBl, TBackend> + CallApiAt<TBl, Error=sp_blockchain::Error> +
Send + 'static,
<TCl as ProvideRuntimeApi<TBl>>::Api:
sp_api::Metadata<TBl> +
sc_offchain::OffchainWorkerApi<TBl> +
sp_transaction_pool::runtime_api::TaggedTransactionQueue<TBl> +
sp_session::SessionKeys<TBl> +
sp_api::ApiErrorExt<Error = sp_blockchain::Error> +
sp_api::ApiExt<TBl, StateBackend = TBackend::State>,
TBl: BlockT,
TBackend: 'static + sc_client_api::backend::Backend<TBl> + Send,
TExPool: MaintainedTransactionPool<Block=TBl, Hash = <TBl as BlockT>::Hash> +
MallocSizeOfWasm + 'static,
TRpc: sc_rpc::RpcExtension<sc_rpc::Metadata>
{
let SpawnTasksParams {
mut config,
task_manager,
client,
on_demand,
backend,
keystore,
transaction_pool,
rpc_extensions_builder,
remote_blockchain,
network,
network_status_sinks,
system_rpc_tx,
telemetry_connection_sinks,
} = params;
let chain_info = client.usage_info().chain;
sp_session::generate_initial_session_keys(
client.clone(),
&BlockId::Hash(chain_info.best_hash),
config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(),
)?;
info!("📦 Highest known block at #{}", chain_info.best_number);
telemetry!(
SUBSTRATE_INFO;
"node.start";
"height" => chain_info.best_number.saturated_into::<u64>(),
"best" => ?chain_info.best_hash
);
let spawn_handle = task_manager.spawn_handle();
spawn_handle.spawn(
"txpool-notifications",
sc_transaction_pool::notification_future(client.clone(), transaction_pool.clone()),
);
spawn_handle.spawn(
"on-transaction-imported",
transaction_notifications(transaction_pool.clone(), network.clone()),
);
let metrics_service = if let Some(PrometheusConfig { port, registry }) =
config.prometheus_config.clone()
{
let metrics = MetricsService::with_prometheus(®istry, &config)?;
spawn_handle.spawn(
"prometheus-endpoint",
prometheus_endpoint::init_prometheus(port, registry).map(drop)
);
metrics
} else {
MetricsService::new()
};
spawn_handle.spawn("telemetry-periodic-send",
metrics_service.run(
client.clone(),
transaction_pool.clone(),
network_status_sinks.clone()
)
);
let gen_handler = |
deny_unsafe: sc_rpc::DenyUnsafe,
rpc_middleware: sc_rpc_server::RpcMiddleware
| gen_handler(
deny_unsafe, rpc_middleware, &config, task_manager.spawn_handle(),
client.clone(), transaction_pool.clone(), keystore.clone(),
on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder,
backend.offchain_storage(), system_rpc_tx.clone()
);
let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry()).ok();
let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.as_ref())?;
let rpc_handlers = RpcHandlers(Arc::new(gen_handler(
sc_rpc::DenyUnsafe::No,
sc_rpc_server::RpcMiddleware::new(rpc_metrics.as_ref().cloned(), "inbrowser")
).into()));
let telemetry = config.telemetry_endpoints.clone().and_then(|endpoints| {
if endpoints.is_empty() {
return None;
}
let genesis_hash = match client.block_hash(Zero::zero()) {
Ok(Some(hash)) => hash,
_ => Default::default(),
};
Some(build_telemetry(
&mut config, endpoints, telemetry_connection_sinks.clone(), network.clone(),
task_manager.spawn_handle(), genesis_hash,
))
});
spawn_handle.spawn("informant", sc_informant::build(
client.clone(),
network_status_sinks.status.clone(),
transaction_pool.clone(),
config.informant_output_format,
));
task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone()));
Ok(rpc_handlers)
}
async fn transaction_notifications<TBl, TExPool>(
transaction_pool: Arc<TExPool>,
network: Arc<NetworkService<TBl, <TBl as BlockT>::Hash>>
)
where
TBl: BlockT,
TExPool: MaintainedTransactionPool<Block=TBl, Hash = <TBl as BlockT>::Hash>,
{
transaction_pool.import_notification_stream()
.for_each(move |hash| {
network.propagate_transaction(hash);
let status = transaction_pool.status();
telemetry!(SUBSTRATE_INFO; "txpool.import";
"ready" => status.ready,
"future" => status.future
);
ready(())
})
.await;
}
fn build_telemetry<TBl: BlockT>(
config: &mut Configuration,
endpoints: sc_telemetry::TelemetryEndpoints,
telemetry_connection_sinks: TelemetryConnectionSinks,
network: Arc<NetworkService<TBl, <TBl as BlockT>::Hash>>,
spawn_handle: SpawnTaskHandle,
genesis_hash: <TBl as BlockT>::Hash,
) -> sc_telemetry::Telemetry {
let is_authority = config.role.is_authority();
let network_id = network.local_peer_id().to_base58();
let name = config.network.node_name.clone();
let impl_name = config.impl_name.clone();
let impl_version = config.impl_version.clone();
let chain_name = config.chain_spec.name().to_owned();
let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig {
endpoints,
wasm_external_transport: config.telemetry_external_transport.take(),
});
let startup_time = SystemTime::UNIX_EPOCH.elapsed()
.map(|dur| dur.as_millis())
.unwrap_or(0);
spawn_handle.spawn(
"telemetry-worker",
telemetry.clone()
.for_each(move |event| {
let sc_telemetry::TelemetryEvent::Connected = event;
telemetry!(SUBSTRATE_INFO; "system.connected";
"name" => name.clone(),
"implementation" => impl_name.clone(),
"version" => impl_version.clone(),
"config" => "",
"chain" => chain_name.clone(),
"genesis_hash" => ?genesis_hash,
"authority" => is_authority,
"startup_time" => startup_time,
"network_id" => network_id.clone()
);
telemetry_connection_sinks.0.lock().retain(|sink| {
sink.unbounded_send(()).is_ok()
});
ready(())
})
);
telemetry
}
fn gen_handler<TBl, TBackend, TExPool, TRpc, TCl>(
deny_unsafe: sc_rpc::DenyUnsafe,
rpc_middleware: sc_rpc_server::RpcMiddleware,
config: &Configuration,
spawn_handle: SpawnTaskHandle,
client: Arc<TCl>,
transaction_pool: Arc<TExPool>,
keystore: Arc<RwLock<Keystore>>,
on_demand: Option<Arc<OnDemand<TBl>>>,
remote_blockchain: Option<Arc<dyn RemoteBlockchain<TBl>>>,
rpc_extensions_builder: &(dyn RpcExtensionBuilder<Output = TRpc> + Send),
offchain_storage: Option<<TBackend as sc_client_api::backend::Backend<TBl>>::OffchainStorage>,
system_rpc_tx: TracingUnboundedSender<sc_rpc::system::Request<TBl>>
) -> sc_rpc_server::RpcHandler<sc_rpc::Metadata>
where
TBl: BlockT,
TCl: ProvideRuntimeApi<TBl> + BlockchainEvents<TBl> + HeaderBackend<TBl> +
HeaderMetadata<TBl, Error=sp_blockchain::Error> + ExecutorProvider<TBl> +
CallApiAt<TBl, Error=sp_blockchain::Error> + ProofProvider<TBl> +
StorageProvider<TBl, TBackend> + BlockBackend<TBl> + Send + Sync + 'static,
TExPool: MaintainedTransactionPool<Block=TBl, Hash = <TBl as BlockT>::Hash> + 'static,
TBackend: sc_client_api::backend::Backend<TBl> + 'static,
TRpc: sc_rpc::RpcExtension<sc_rpc::Metadata>,
<TCl as ProvideRuntimeApi<TBl>>::Api:
sp_session::SessionKeys<TBl> +
sp_api::Metadata<TBl, Error = sp_blockchain::Error>,
{
use sc_rpc::{chain, state, author, system, offchain};
let system_info = sc_rpc::system::SystemInfo {
chain_name: config.chain_spec.name().into(),
impl_name: config.impl_name.clone(),
impl_version: config.impl_version.clone(),
properties: config.chain_spec.properties(),
chain_type: config.chain_spec.chain_type(),
};
let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle);
let subscriptions = SubscriptionManager::new(Arc::new(task_executor.clone()));
let (chain, state, child_state) = if let (Some(remote_blockchain), Some(on_demand)) =
(remote_blockchain, on_demand) {
let chain = sc_rpc::chain::new_light(
client.clone(),
subscriptions.clone(),
remote_blockchain.clone(),
on_demand.clone(),
);
let (state, child_state) = sc_rpc::state::new_light(
client.clone(),
subscriptions.clone(),
remote_blockchain.clone(),
on_demand,
);
(chain, state, child_state)
} else {
let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone());
let (state, child_state) = sc_rpc::state::new_full(client.clone(), subscriptions.clone());
(chain, state, child_state)
};
let author = sc_rpc::author::Author::new(
client,
transaction_pool,
subscriptions,
keystore,
deny_unsafe,
);
let system = system::System::new(system_info, system_rpc_tx, deny_unsafe);
let maybe_offchain_rpc = offchain_storage.map(|storage| {
let offchain = sc_rpc::offchain::Offchain::new(storage, deny_unsafe);
offchain::OffchainApi::to_delegate(offchain)
});
sc_rpc_server::rpc_handler(
(
state::StateApi::to_delegate(state),
state::ChildStateApi::to_delegate(child_state),
chain::ChainApi::to_delegate(chain),
maybe_offchain_rpc,
author::AuthorApi::to_delegate(author),
system::SystemApi::to_delegate(system),
rpc_extensions_builder.build(deny_unsafe, task_executor),
),
rpc_middleware
)
}
pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> {
pub config: &'a Configuration,
pub client: Arc<TCl>,
pub transaction_pool: Arc<TExPool>,
pub spawn_handle: SpawnTaskHandle,
pub import_queue: TImpQu,
pub on_demand: Option<Arc<OnDemand<TBl>>>,
pub block_announce_validator_builder: Option<Box<
dyn FnOnce(Arc<TCl>) -> Box<dyn BlockAnnounceValidator<TBl> + Send> + Send
>>,
pub finality_proof_request_builder: Option<BoxFinalityProofRequestBuilder<TBl>>,
pub finality_proof_provider: Option<Arc<dyn FinalityProofProvider<TBl>>>,
}
pub fn build_network<TBl, TExPool, TImpQu, TCl>(
params: BuildNetworkParams<TBl, TExPool, TImpQu, TCl>
) -> Result<
(
Arc<NetworkService<TBl, <TBl as BlockT>::Hash>>,
NetworkStatusSinks<TBl>,
TracingUnboundedSender<sc_rpc::system::Request<TBl>>,
NetworkStarter,
),
Error
>
where
TBl: BlockT,
TCl: ProvideRuntimeApi<TBl> + HeaderMetadata<TBl, Error=sp_blockchain::Error> + Chain<TBl> +
BlockBackend<TBl> + BlockIdTo<TBl, Error=sp_blockchain::Error> + ProofProvider<TBl> +
HeaderBackend<TBl> + BlockchainEvents<TBl> + 'static,
TExPool: MaintainedTransactionPool<Block=TBl, Hash = <TBl as BlockT>::Hash> + 'static,
TImpQu: ImportQueue<TBl> + 'static,
{
let BuildNetworkParams {
config, client, transaction_pool, spawn_handle, import_queue, on_demand,
block_announce_validator_builder, finality_proof_request_builder, finality_proof_provider,
} = params;
let transaction_pool_adapter = Arc::new(TransactionPoolAdapter {
imports_external_transactions: !matches!(config.role, Role::Light),
pool: transaction_pool,
client: client.clone(),
});
let protocol_id = {
let protocol_id_full = match config.chain_spec.protocol_id() {
Some(pid) => pid,
None => {
warn!("Using default protocol ID {:?} because none is configured in the \
chain specs", DEFAULT_PROTOCOL_ID
);
DEFAULT_PROTOCOL_ID
}
};
sc_network::config::ProtocolId::from(protocol_id_full)
};
let block_announce_validator = if let Some(f) = block_announce_validator_builder {
f(client.clone())
} else {
Box::new(DefaultBlockAnnounceValidator)
};
let network_params = sc_network::config::Params {
role: config.role.clone(),
executor: {
let spawn_handle = Clone::clone(&spawn_handle);
Some(Box::new(move |fut| {
spawn_handle.spawn("libp2p-node", fut);
}))
},
network_config: config.network.clone(),
chain: client.clone(),
finality_proof_provider,
finality_proof_request_builder,
on_demand: on_demand,
transaction_pool: transaction_pool_adapter as _,
import_queue: Box::new(import_queue),
protocol_id,
block_announce_validator,
metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone())
};
let has_bootnodes = !network_params.network_config.boot_nodes.is_empty();
let network_mut = sc_network::NetworkWorker::new(network_params)?;
let network = network_mut.service().clone();
let network_status_sinks = NetworkStatusSinks::new();
let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc");
let future = build_network_future(
config.role.clone(),
network_mut,
client,
network_status_sinks.clone(),
system_rpc_rx,
has_bootnodes,
config.announce_block,
);
let (network_start_tx, network_start_rx) = oneshot::channel();
spawn_handle.spawn_blocking("network-worker", async move {
if network_start_rx.await.is_err() {
debug_assert!(false);
log::warn!(
"The NetworkStart returned as part of `build_network` has been silently dropped"
);
return;
}
future.await
});
Ok((network, network_status_sinks, system_rpc_tx, NetworkStarter(network_start_tx)))
}
#[must_use]
pub struct NetworkStarter(oneshot::Sender<()>);
impl NetworkStarter {
pub fn start_network(self) {
let _ = self.0.send(());
}
}