diff --git a/.gitignore b/.gitignore index a9ea2b9204..5b3ea229ff 100644 --- a/.gitignore +++ b/.gitignore @@ -20,4 +20,5 @@ cobertura.xml /test/integration-tests/template-provider **/template-provider stratum-message-generator -*.log \ No newline at end of file +*.log +.ra-target diff --git a/common/Cargo.lock b/common/Cargo.lock index 66519658df..e86792d397 100644 --- a/common/Cargo.lock +++ b/common/Cargo.lock @@ -562,6 +562,19 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +[[package]] +name = "handlers_sv2" +version = "0.1.0" +dependencies = [ + "binary_sv2", + "common_messages_sv2", + "job_declaration_sv2", + "mining_sv2", + "parsers_sv2", + "template_distribution_sv2", + "trait-variant", +] + [[package]] name = "hashbrown" version = "0.15.4" @@ -956,6 +969,7 @@ dependencies = [ "channels_sv2", "codec_sv2", "common_messages_sv2", + "handlers_sv2", "hex-conservative 0.3.0", "job_declaration_sv2", "mining_sv2", @@ -1013,7 +1027,7 @@ version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "bitcoin_hashes 0.13.0", + "bitcoin_hashes 0.14.0", "secp256k1-sys 0.10.1", ] @@ -1254,6 +1268,17 @@ dependencies = [ "once_cell", ] +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "typenum" version = "1.18.0" diff --git a/protocols/Cargo.toml b/protocols/Cargo.toml index 3380c8827c..2b4c8727e5 100644 --- a/protocols/Cargo.toml +++ b/protocols/Cargo.toml @@ -17,7 +17,8 @@ members = [ "v2/sv2-ffi", "v2/roles-logic-sv2", "v2/channels-sv2", - "v2/parsers-sv2", + "v2/parsers-sv2", + "v2/handlers-sv2", ] [profile.dev] diff --git a/protocols/v2/channels-sv2/src/client/extended.rs b/protocols/v2/channels-sv2/src/client/extended.rs index 1af2f2e03b..2928c4e7cb 100644 --- a/protocols/v2/channels-sv2/src/client/extended.rs +++ b/protocols/v2/channels-sv2/src/client/extended.rs @@ -177,7 +177,7 @@ impl<'a> ExtendedChannel<'a> { /// Called when a `NewExtendedMiningJob` message is received from upstream. pub fn on_new_extended_mining_job( &mut self, - new_extended_mining_job: NewExtendedMiningJob<'a>, + new_extended_mining_job: NewExtendedMiningJob<'static>, ) { match new_extended_mining_job.min_ntime.clone().into_inner() { Some(_min_ntime) => { @@ -208,7 +208,7 @@ impl<'a> ExtendedChannel<'a> { /// The chain tip information is not kept in the channel state. pub fn on_set_new_prev_hash( &mut self, - set_new_prev_hash: SetNewPrevHashMp<'a>, + set_new_prev_hash: SetNewPrevHashMp<'static>, ) -> Result<(), ExtendedChannelError> { match self.future_jobs.remove(&set_new_prev_hash.job_id) { Some(mut activated_job) => { diff --git a/protocols/v2/handlers-sv2/Cargo.toml b/protocols/v2/handlers-sv2/Cargo.toml new file mode 100644 index 0000000000..492f8bcc69 --- /dev/null +++ b/protocols/v2/handlers-sv2/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "handlers_sv2" +version = "0.1.0" +authors = ["The Stratum V2 Developers"] +edition = "2018" +readme = "README.md" +description = "Sv2 Message handlers" +documentation = "https://docs.rs/handlers_sv2" +license = "MIT OR Apache-2.0" +repository = "https://github.com/stratum-mining/stratum" +homepage = "https://stratumprotocol.org" +keywords = ["stratum", "mining", "bitcoin", "protocol"] + +[dependencies] +trait-variant = "0.1.2" +parsers_sv2 = { path = "../parsers-sv2", version = "^0.1.0"} +binary_sv2 = { path = "../binary-sv2", version = "^3.0.0" } +common_messages_sv2 = { path = "../subprotocols/common-messages", version = "^5.0.0" } +mining_sv2 = { path = "../subprotocols/mining", version = "^4.0.0" } +template_distribution_sv2 = { path = "../subprotocols/template-distribution", version = "^3.0.0" } +job_declaration_sv2 = { path = "../subprotocols/job-declaration", version = "^4.0.0" } diff --git a/protocols/v2/handlers-sv2/src/common.rs b/protocols/v2/handlers-sv2/src/common.rs new file mode 100644 index 0000000000..b8987b7d7d --- /dev/null +++ b/protocols/v2/handlers-sv2/src/common.rs @@ -0,0 +1,162 @@ +use crate::error::HandlerError as Error; +use common_messages_sv2::{ + ChannelEndpointChanged, Reconnect, SetupConnectionError, SetupConnectionSuccess, *, +}; +use core::convert::TryInto; +use parsers_sv2::CommonMessages; + +pub trait ParseCommonMessagesFromUpstreamSync { + fn handle_common_message(&mut self, message_type: u8, payload: &mut [u8]) -> Result<(), Error> { + let parsed: CommonMessages<'_> = (message_type, payload).try_into()?; + self.dispatch_common_message(parsed) + } + + fn dispatch_common_message(&mut self, message: CommonMessages<'_>) -> Result<(), Error> { + match message { + CommonMessages::SetupConnectionSuccess(msg) => { + self.handle_setup_connection_success(msg) + } + CommonMessages::SetupConnectionError(msg) => self.handle_setup_connection_error(msg), + CommonMessages::ChannelEndpointChanged(msg) => { + self.handle_channel_endpoint_changed(msg) + } + CommonMessages::Reconnect(msg) => self.handle_reconnect(msg), + + CommonMessages::SetupConnection(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_SETUP_CONNECTION)) + } + } + } + + fn handle_setup_connection_success(&mut self, msg: SetupConnectionSuccess) + -> Result<(), Error>; + + fn handle_setup_connection_error(&mut self, msg: SetupConnectionError) -> Result<(), Error>; + + fn handle_channel_endpoint_changed(&mut self, msg: ChannelEndpointChanged) + -> Result<(), Error>; + + fn handle_reconnect(&mut self, msg: Reconnect) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseCommonMessagesFromUpstreamAsync { + async fn handle_common_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result, _> = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_common_message(parsed).await + } + } + + async fn dispatch_common_message(&mut self, message: CommonMessages<'_>) -> Result<(), Error> { + async move { + match message { + CommonMessages::SetupConnectionSuccess(msg) => { + self.handle_setup_connection_success(msg).await + } + CommonMessages::SetupConnectionError(msg) => { + self.handle_setup_connection_error(msg).await + } + CommonMessages::ChannelEndpointChanged(msg) => { + self.handle_channel_endpoint_changed(msg).await + } + CommonMessages::Reconnect(msg) => self.handle_reconnect(msg).await, + + CommonMessages::SetupConnection(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_SETUP_CONNECTION)) + } + } + } + } + + async fn handle_setup_connection_success( + &mut self, + msg: SetupConnectionSuccess, + ) -> Result<(), Error>; + + async fn handle_setup_connection_error( + &mut self, + msg: SetupConnectionError, + ) -> Result<(), Error>; + + async fn handle_channel_endpoint_changed( + &mut self, + msg: ChannelEndpointChanged, + ) -> Result<(), Error>; + + async fn handle_reconnect(&mut self, msg: Reconnect) -> Result<(), Error>; +} + +pub trait ParseCommonMessagesFromDownstreamSync +where + Self: Sized, +{ + fn handle_common_message(&mut self, message_type: u8, payload: &mut [u8]) -> Result<(), Error> { + let parsed: CommonMessages<'_> = (message_type, payload).try_into()?; + self.dispatch_common_message(parsed) + } + + fn dispatch_common_message(&mut self, message: CommonMessages<'_>) -> Result<(), Error> { + match message { + CommonMessages::SetupConnectionSuccess(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, + )), + CommonMessages::SetupConnectionError(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SETUP_CONNECTION_ERROR, + )), + CommonMessages::ChannelEndpointChanged(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_CHANNEL_ENDPOINT_CHANGED, + )), + CommonMessages::Reconnect(_) => Err(Error::UnexpectedMessage(MESSAGE_TYPE_RECONNECT)), + + CommonMessages::SetupConnection(msg) => self.handle_setup_connection(msg), + } + } + + fn handle_setup_connection(&mut self, msg: SetupConnection) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseCommonMessagesFromDownstreamAsync +where + Self: Sized, +{ + async fn handle_common_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result, _> = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_common_message(parsed).await + } + } + + async fn dispatch_common_message(&mut self, message: CommonMessages<'_>) -> Result<(), Error> { + async move { + match message { + CommonMessages::SetupConnectionSuccess(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS, + )), + CommonMessages::SetupConnectionError(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SETUP_CONNECTION_ERROR, + )), + CommonMessages::ChannelEndpointChanged(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_CHANNEL_ENDPOINT_CHANGED, + )), + CommonMessages::Reconnect(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_RECONNECT)) + } + CommonMessages::SetupConnection(msg) => self.handle_setup_connection(msg).await, + } + } + } + + async fn handle_setup_connection(&mut self, msg: SetupConnection) -> Result<(), Error>; +} diff --git a/protocols/v2/handlers-sv2/src/error.rs b/protocols/v2/handlers-sv2/src/error.rs new file mode 100644 index 0000000000..cae18d750d --- /dev/null +++ b/protocols/v2/handlers-sv2/src/error.rs @@ -0,0 +1,16 @@ +use parsers_sv2::ParserError; + +#[derive(Debug)] +pub enum HandlerError { + UnexpectedMessage(u8), + ParserError(ParserError), + OpenStandardMiningChannelError, + OpenExtendedMiningChannelError, + External(Box), +} + +impl From for HandlerError { + fn from(value: ParserError) -> HandlerError { + HandlerError::ParserError(value) + } +} diff --git a/protocols/v2/handlers-sv2/src/job_declaration.rs b/protocols/v2/handlers-sv2/src/job_declaration.rs new file mode 100644 index 0000000000..6c7d3439b8 --- /dev/null +++ b/protocols/v2/handlers-sv2/src/job_declaration.rs @@ -0,0 +1,238 @@ +use crate::error::HandlerError as Error; +use core::convert::TryInto; +use job_declaration_sv2::{ + MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN, MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, + MESSAGE_TYPE_DECLARE_MINING_JOB, MESSAGE_TYPE_DECLARE_MINING_JOB_ERROR, + MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS, MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, + MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, MESSAGE_TYPE_PUSH_SOLUTION, *, +}; +use parsers_sv2::JobDeclaration; + +pub trait ParseJobDeclarationMessagesFromUpstreamSync { + fn handle_job_declaration_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: JobDeclaration<'_> = (message_type, payload).try_into()?; + self.dispatch_job_declaration(parsed) + } + + fn dispatch_job_declaration(&mut self, message: JobDeclaration<'_>) -> Result<(), Error> { + match message { + JobDeclaration::AllocateMiningJobTokenSuccess(msg) => { + self.handle_allocate_mining_job_token_success(msg) + } + JobDeclaration::DeclareMiningJobSuccess(msg) => { + self.handle_declare_mining_job_success(msg) + } + JobDeclaration::DeclareMiningJobError(msg) => self.handle_declare_mining_job_error(msg), + JobDeclaration::ProvideMissingTransactions(msg) => { + self.handle_provide_missing_transactions(msg) + } + JobDeclaration::AllocateMiningJobToken(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN, + )), + JobDeclaration::DeclareMiningJob(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_DECLARE_MINING_JOB)) + } + JobDeclaration::ProvideMissingTransactionsSuccess(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS, + )), + JobDeclaration::PushSolution(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_PUSH_SOLUTION)) + } + } + } + + fn handle_allocate_mining_job_token_success( + &mut self, + msg: AllocateMiningJobTokenSuccess, + ) -> Result<(), Error>; + + fn handle_declare_mining_job_success( + &mut self, + msg: DeclareMiningJobSuccess, + ) -> Result<(), Error>; + + fn handle_declare_mining_job_error(&mut self, msg: DeclareMiningJobError) -> Result<(), Error>; + + fn handle_provide_missing_transactions( + &mut self, + msg: ProvideMissingTransactions, + ) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseJobDeclarationMessagesFromUpstreamAsync { + async fn handle_job_declaration_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result, _> = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_job_declaration(parsed).await + } + } + + async fn dispatch_job_declaration(&mut self, message: JobDeclaration<'_>) -> Result<(), Error> { + async move { + match message { + JobDeclaration::AllocateMiningJobTokenSuccess(msg) => { + self.handle_allocate_mining_job_token_success(msg).await + } + JobDeclaration::DeclareMiningJobSuccess(msg) => { + self.handle_declare_mining_job_success(msg).await + } + JobDeclaration::DeclareMiningJobError(msg) => { + self.handle_declare_mining_job_error(msg).await + } + JobDeclaration::ProvideMissingTransactions(msg) => { + self.handle_provide_missing_transactions(msg).await + } + JobDeclaration::AllocateMiningJobToken(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN, + )), + JobDeclaration::DeclareMiningJob(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_DECLARE_MINING_JOB)) + } + JobDeclaration::ProvideMissingTransactionsSuccess(_) => Err( + Error::UnexpectedMessage(MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS_SUCCESS), + ), + JobDeclaration::PushSolution(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_PUSH_SOLUTION)) + } + } + } + } + + async fn handle_allocate_mining_job_token_success( + &mut self, + msg: AllocateMiningJobTokenSuccess, + ) -> Result<(), Error>; + + async fn handle_declare_mining_job_success( + &mut self, + msg: DeclareMiningJobSuccess, + ) -> Result<(), Error>; + + async fn handle_declare_mining_job_error( + &mut self, + msg: DeclareMiningJobError, + ) -> Result<(), Error>; + + async fn handle_provide_missing_transactions( + &mut self, + msg: ProvideMissingTransactions, + ) -> Result<(), Error>; +} + +pub trait ParseJobDeclarationMessagesFromDownstreamSync { + fn handle_job_declaration_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: JobDeclaration<'_> = (message_type, payload).try_into()?; + self.dispatch_job_declaration(parsed) + } + + fn dispatch_job_declaration(&mut self, message: JobDeclaration<'_>) -> Result<(), Error> { + match message { + JobDeclaration::AllocateMiningJobToken(msg) => { + self.handle_allocate_mining_job_token(msg) + } + JobDeclaration::DeclareMiningJob(msg) => self.handle_declare_mining_job(msg), + JobDeclaration::ProvideMissingTransactionsSuccess(msg) => { + self.handle_provide_missing_transactions_success(msg) + } + JobDeclaration::PushSolution(msg) => self.handle_push_solution(msg), + + JobDeclaration::AllocateMiningJobTokenSuccess(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, + )), + JobDeclaration::DeclareMiningJobSuccess(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS, + )), + JobDeclaration::DeclareMiningJobError(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_DECLARE_MINING_JOB_ERROR, + )), + JobDeclaration::ProvideMissingTransactions(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, + )), + } + } + + fn handle_allocate_mining_job_token( + &mut self, + msg: AllocateMiningJobToken, + ) -> Result<(), Error>; + + fn handle_declare_mining_job(&mut self, msg: DeclareMiningJob) -> Result<(), Error>; + + fn handle_provide_missing_transactions_success( + &mut self, + msg: ProvideMissingTransactionsSuccess, + ) -> Result<(), Error>; + + fn handle_push_solution(&mut self, msg: PushSolution) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseJobDeclarationMessagesFromDownstreamAsync { + async fn handle_job_declaration_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result, _> = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_job_declaration(parsed).await + } + } + + async fn dispatch_job_declaration(&mut self, message: JobDeclaration<'_>) -> Result<(), Error> { + async move { + match message { + JobDeclaration::AllocateMiningJobToken(msg) => { + self.handle_allocate_mining_job_token(msg).await + } + JobDeclaration::DeclareMiningJob(msg) => self.handle_declare_mining_job(msg).await, + JobDeclaration::ProvideMissingTransactionsSuccess(msg) => { + self.handle_provide_missing_transactions_success(msg).await + } + JobDeclaration::PushSolution(msg) => self.handle_push_solution(msg).await, + + JobDeclaration::AllocateMiningJobTokenSuccess(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, + )), + JobDeclaration::DeclareMiningJobSuccess(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_DECLARE_MINING_JOB_SUCCESS, + )), + JobDeclaration::DeclareMiningJobError(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_DECLARE_MINING_JOB_ERROR, + )), + JobDeclaration::ProvideMissingTransactions(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_PROVIDE_MISSING_TRANSACTIONS, + )), + } + } + } + + async fn handle_allocate_mining_job_token( + &mut self, + msg: AllocateMiningJobToken, + ) -> Result<(), Error>; + + async fn handle_declare_mining_job(&mut self, msg: DeclareMiningJob) -> Result<(), Error>; + + async fn handle_provide_missing_transactions_success( + &mut self, + msg: ProvideMissingTransactionsSuccess, + ) -> Result<(), Error>; + + async fn handle_push_solution(&mut self, msg: PushSolution) -> Result<(), Error>; +} diff --git a/protocols/v2/handlers-sv2/src/lib.rs b/protocols/v2/handlers-sv2/src/lib.rs new file mode 100644 index 0000000000..61074875b6 --- /dev/null +++ b/protocols/v2/handlers-sv2/src/lib.rs @@ -0,0 +1,30 @@ +mod common; +mod error; +mod job_declaration; +mod mining; +mod template_distribution; + +pub use error::HandlerError; + +pub use common::{ + ParseCommonMessagesFromDownstreamAsync, ParseCommonMessagesFromDownstreamSync, + ParseCommonMessagesFromUpstreamAsync, ParseCommonMessagesFromUpstreamSync, +}; + +pub use mining::{ + ParseMiningMessagesFromDownstreamAsync, ParseMiningMessagesFromDownstreamSync, + ParseMiningMessagesFromUpstreamAsync, ParseMiningMessagesFromUpstreamSync, + SupportedChannelTypes, +}; + +pub use template_distribution::{ + ParseTemplateDistributionMessagesFromClientAsync, + ParseTemplateDistributionMessagesFromClientSync, + ParseTemplateDistributionMessagesFromServerAsync, + ParseTemplateDistributionMessagesFromServerSync, +}; + +pub use job_declaration::{ + ParseJobDeclarationMessagesFromDownstreamAsync, ParseJobDeclarationMessagesFromDownstreamSync, + ParseJobDeclarationMessagesFromUpstreamAsync, ParseJobDeclarationMessagesFromUpstreamSync, +}; diff --git a/protocols/v2/handlers-sv2/src/mining.rs b/protocols/v2/handlers-sv2/src/mining.rs new file mode 100644 index 0000000000..83851ff475 --- /dev/null +++ b/protocols/v2/handlers-sv2/src/mining.rs @@ -0,0 +1,547 @@ +use crate::error::HandlerError as Error; +use binary_sv2::Str0255; +use mining_sv2::{ + CloseChannel, NewExtendedMiningJob, NewMiningJob, OpenExtendedMiningChannel, + OpenExtendedMiningChannelSuccess, OpenMiningChannelError, OpenStandardMiningChannel, + OpenStandardMiningChannelSuccess, SetCustomMiningJob, SetCustomMiningJobError, + SetCustomMiningJobSuccess, SetExtranoncePrefix, SetGroupChannel, SetNewPrevHash, SetTarget, + SubmitSharesError, SubmitSharesExtended, SubmitSharesStandard, SubmitSharesSuccess, + UpdateChannel, UpdateChannelError, +}; +use parsers_sv2::Mining; +use std::convert::TryInto; + +use mining_sv2::*; +use std::fmt::Debug as D; + +#[derive(PartialEq, Eq)] +pub enum SupportedChannelTypes { + Standard, + Extended, + Group, + GroupAndExtended, +} + +pub trait ParseMiningMessagesFromDownstreamSync +where + Self: Sized + D, +{ + fn get_channel_type(&self) -> SupportedChannelTypes; + fn is_work_selection_enabled(&self) -> bool; + + fn is_downstream_authorized(&self, user_identity: &Str0255) -> Result; + + fn handle_mining_message(&mut self, message_type: u8, payload: &mut [u8]) -> Result<(), Error> { + let parsed: Mining = (message_type, payload).try_into()?; + self.dispatch_mining_message(parsed) + } + + fn dispatch_mining_message(&mut self, message: Mining) -> Result<(), Error> { + let (channel_type, work_selection) = + (self.get_channel_type(), self.is_work_selection_enabled()); + + use Mining::*; + match message { + OpenStandardMiningChannel(m) => { + if !self.is_downstream_authorized(&m.user_identity)? { + // Add correct error type + return Err(Error::OpenStandardMiningChannelError); + } + + match channel_type { + SupportedChannelTypes::Standard + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_standard_mining_channel(m) + } + SupportedChannelTypes::Extended => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, + )), + } + } + OpenExtendedMiningChannel(m) => { + if !self.is_downstream_authorized(&m.user_identity)? { + // Add correct Error type + return Err(Error::OpenExtendedMiningChannelError); + } + + match channel_type { + SupportedChannelTypes::Extended | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_extended_mining_channel(m) + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, + )), + } + } + UpdateChannel(m) => self.handle_update_channel(m), + + SubmitSharesStandard(m) => match channel_type { + SupportedChannelTypes::Standard + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => self.handle_submit_shares_standard(m), + SupportedChannelTypes::Extended => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, + )), + }, + + SubmitSharesExtended(m) => match channel_type { + SupportedChannelTypes::Extended | SupportedChannelTypes::GroupAndExtended => { + self.handle_submit_shares_extended(m) + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, + )), + }, + + SetCustomMiningJob(m) => match (channel_type, work_selection) { + (SupportedChannelTypes::Extended, true) + | (SupportedChannelTypes::GroupAndExtended, true) => { + self.handle_set_custom_mining_job(m) + } + _ => Err(Error::UnexpectedMessage(MESSAGE_TYPE_SET_CUSTOM_MINING_JOB)), + }, + + _ => Err(Error::UnexpectedMessage(0)), + } + } + + fn handle_open_standard_mining_channel( + &mut self, + msg: OpenStandardMiningChannel, + ) -> Result<(), Error>; + + fn handle_open_extended_mining_channel( + &mut self, + msg: OpenExtendedMiningChannel, + ) -> Result<(), Error>; + + fn handle_update_channel(&mut self, msg: UpdateChannel) -> Result<(), Error>; + + fn handle_submit_shares_standard(&mut self, msg: SubmitSharesStandard) -> Result<(), Error>; + + fn handle_submit_shares_extended(&mut self, msg: SubmitSharesExtended) -> Result<(), Error>; + + fn handle_set_custom_mining_job(&mut self, msg: SetCustomMiningJob) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseMiningMessagesFromDownstreamAsync +where + Self: Sized + D, +{ + fn get_channel_type(&self) -> SupportedChannelTypes; + fn is_work_selection_enabled(&self) -> bool; + + fn is_downstream_authorized(&self, user_identity: &Str0255) -> Result; + + async fn handle_mining_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_mining_message(parsed).await + } + } + + async fn dispatch_mining_message(&mut self, message: Mining) -> Result<(), Error> { + let (channel_type, work_selection) = + (self.get_channel_type(), self.is_work_selection_enabled()); + + async move { + use Mining::*; + match message { + OpenStandardMiningChannel(m) => { + if !self.is_downstream_authorized(&m.user_identity)? { + // Add correct error type + return Err(Error::OpenStandardMiningChannelError); + } + + match channel_type { + SupportedChannelTypes::Standard + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_standard_mining_channel(m).await + } + SupportedChannelTypes::Extended => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL, + )), + } + } + OpenExtendedMiningChannel(m) => { + if !self.is_downstream_authorized(&m.user_identity)? { + // Add correct Error type + return Err(Error::OpenExtendedMiningChannelError); + } + + match channel_type { + SupportedChannelTypes::Extended + | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_extended_mining_channel(m).await + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL, + )), + } + } + UpdateChannel(m) => self.handle_update_channel(m).await, + + SubmitSharesStandard(m) => match channel_type { + SupportedChannelTypes::Standard + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => { + self.handle_submit_shares_standard(m).await + } + SupportedChannelTypes::Extended => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SUBMIT_SHARES_STANDARD, + )), + }, + + SubmitSharesExtended(m) => match channel_type { + SupportedChannelTypes::Extended | SupportedChannelTypes::GroupAndExtended => { + self.handle_submit_shares_extended(m).await + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SUBMIT_SHARES_EXTENDED, + )), + }, + + SetCustomMiningJob(m) => match (channel_type, work_selection) { + (SupportedChannelTypes::Extended, true) + | (SupportedChannelTypes::GroupAndExtended, true) => { + self.handle_set_custom_mining_job(m).await + } + _ => Err(Error::UnexpectedMessage(MESSAGE_TYPE_SET_CUSTOM_MINING_JOB)), + }, + + _ => Err(Error::UnexpectedMessage(0)), + } + } + } + + async fn handle_open_standard_mining_channel( + &mut self, + msg: OpenStandardMiningChannel, + ) -> Result<(), Error>; + + async fn handle_open_extended_mining_channel( + &mut self, + msg: OpenExtendedMiningChannel, + ) -> Result<(), Error>; + + async fn handle_update_channel(&mut self, msg: UpdateChannel) -> Result<(), Error>; + + async fn handle_submit_shares_standard( + &mut self, + msg: SubmitSharesStandard, + ) -> Result<(), Error>; + + async fn handle_submit_shares_extended( + &mut self, + msg: SubmitSharesExtended, + ) -> Result<(), Error>; + + async fn handle_set_custom_mining_job(&mut self, msg: SetCustomMiningJob) -> Result<(), Error>; +} + +pub trait ParseMiningMessagesFromUpstreamSync +where + Self: Sized + D, +{ + fn get_channel_type(&self) -> SupportedChannelTypes; + fn is_work_selection_enabled(&self) -> bool; + + fn handle_mining_message(&mut self, message_type: u8, payload: &mut [u8]) -> Result<(), Error> { + let parsed: Mining = (message_type, payload).try_into()?; + self.dispatch_mining_message(parsed) + } + + fn dispatch_mining_message(&mut self, message: Mining) -> Result<(), Error> { + let (channel_type, work_selection) = + (self.get_channel_type(), self.is_work_selection_enabled()); + + use Mining::*; + match message { + OpenStandardMiningChannelSuccess(m) => match channel_type { + SupportedChannelTypes::Standard + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_standard_mining_channel_success(m) + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS, + )), + }, + + OpenExtendedMiningChannelSuccess(m) => match channel_type { + SupportedChannelTypes::Extended | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_extended_mining_channel_success(m) + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS, + )), + }, + + OpenMiningChannelError(m) => self.handle_open_mining_channel_error(m), + UpdateChannelError(m) => self.handle_update_channel_error(m), + CloseChannel(m) => self.handle_close_channel(m), + SetExtranoncePrefix(m) => self.handle_set_extranonce_prefix(m), + SubmitSharesSuccess(m) => self.handle_submit_shares_success(m), + SubmitSharesError(m) => self.handle_submit_shares_error(m), + + NewMiningJob(m) => match channel_type { + SupportedChannelTypes::Standard => self.handle_new_mining_job(m), + _ => Err(Error::UnexpectedMessage(MESSAGE_TYPE_NEW_MINING_JOB)), + }, + + NewExtendedMiningJob(m) => match channel_type { + SupportedChannelTypes::Extended + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => self.handle_new_extended_mining_job(m), + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB, + )), + }, + + SetNewPrevHash(m) => self.handle_set_new_prev_hash(m), + + SetCustomMiningJobSuccess(m) => match (channel_type, work_selection) { + (SupportedChannelTypes::Extended, true) + | (SupportedChannelTypes::GroupAndExtended, true) => { + self.handle_set_custom_mining_job_success(m) + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS, + )), + }, + + SetCustomMiningJobError(m) => match (channel_type, work_selection) { + (SupportedChannelTypes::Extended, true) + | (SupportedChannelTypes::Group, true) + | (SupportedChannelTypes::GroupAndExtended, true) => { + self.handle_set_custom_mining_job_error(m) + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR, + )), + }, + + SetTarget(m) => self.handle_set_target(m), + + SetGroupChannel(m) => match channel_type { + SupportedChannelTypes::Group | SupportedChannelTypes::GroupAndExtended => { + self.handle_set_group_channel(m) + } + _ => Err(Error::UnexpectedMessage(MESSAGE_TYPE_SET_GROUP_CHANNEL)), + }, + + _ => Err(Error::UnexpectedMessage(0)), + } + } + + fn handle_open_standard_mining_channel_success( + &mut self, + msg: OpenStandardMiningChannelSuccess, + ) -> Result<(), Error>; + + fn handle_open_extended_mining_channel_success( + &mut self, + msg: OpenExtendedMiningChannelSuccess, + ) -> Result<(), Error>; + + fn handle_open_mining_channel_error( + &mut self, + msg: OpenMiningChannelError, + ) -> Result<(), Error>; + + fn handle_update_channel_error(&mut self, msg: UpdateChannelError) -> Result<(), Error>; + + fn handle_close_channel(&mut self, msg: CloseChannel) -> Result<(), Error>; + + fn handle_set_extranonce_prefix(&mut self, msg: SetExtranoncePrefix) -> Result<(), Error>; + + fn handle_submit_shares_success(&mut self, msg: SubmitSharesSuccess) -> Result<(), Error>; + + fn handle_submit_shares_error(&mut self, msg: SubmitSharesError) -> Result<(), Error>; + + fn handle_new_mining_job(&mut self, msg: NewMiningJob) -> Result<(), Error>; + + fn handle_new_extended_mining_job(&mut self, msg: NewExtendedMiningJob) -> Result<(), Error>; + + fn handle_set_new_prev_hash(&mut self, msg: SetNewPrevHash) -> Result<(), Error>; + + fn handle_set_custom_mining_job_success( + &mut self, + msg: SetCustomMiningJobSuccess, + ) -> Result<(), Error>; + + fn handle_set_custom_mining_job_error( + &mut self, + msg: SetCustomMiningJobError, + ) -> Result<(), Error>; + + fn handle_set_target(&mut self, msg: SetTarget) -> Result<(), Error>; + + fn handle_set_group_channel(&mut self, msg: SetGroupChannel) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseMiningMessagesFromUpstreamAsync +where + Self: Sized + D, +{ + fn get_channel_type(&self) -> SupportedChannelTypes; + fn is_work_selection_enabled(&self) -> bool; + + async fn handle_mining_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_mining_message(parsed).await + } + } + + async fn dispatch_mining_message(&mut self, message: Mining) -> Result<(), Error> { + let (channel_type, work_selection) = + (self.get_channel_type(), self.is_work_selection_enabled()); + + async move { + use Mining::*; + match message { + OpenStandardMiningChannelSuccess(m) => match channel_type { + SupportedChannelTypes::Standard + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_standard_mining_channel_success(m).await + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_STANDARD_MINING_CHANNEL_SUCCESS, + )), + }, + + OpenExtendedMiningChannelSuccess(m) => match channel_type { + SupportedChannelTypes::Extended | SupportedChannelTypes::GroupAndExtended => { + self.handle_open_extended_mining_channel_success(m).await + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_OPEN_EXTENDED_MINING_CHANNEL_SUCCESS, + )), + }, + + OpenMiningChannelError(m) => self.handle_open_mining_channel_error(m).await, + UpdateChannelError(m) => self.handle_update_channel_error(m).await, + CloseChannel(m) => self.handle_close_channel(m).await, + SetExtranoncePrefix(m) => self.handle_set_extranonce_prefix(m).await, + SubmitSharesSuccess(m) => self.handle_submit_shares_success(m).await, + SubmitSharesError(m) => self.handle_submit_shares_error(m).await, + + NewMiningJob(m) => match channel_type { + SupportedChannelTypes::Standard => self.handle_new_mining_job(m).await, + _ => Err(Error::UnexpectedMessage(MESSAGE_TYPE_NEW_MINING_JOB)), + }, + + NewExtendedMiningJob(m) => match channel_type { + SupportedChannelTypes::Extended + | SupportedChannelTypes::Group + | SupportedChannelTypes::GroupAndExtended => { + self.handle_new_extended_mining_job(m).await + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_NEW_EXTENDED_MINING_JOB, + )), + }, + + SetNewPrevHash(m) => self.handle_set_new_prev_hash(m).await, + + SetCustomMiningJobSuccess(m) => match (channel_type, work_selection) { + (SupportedChannelTypes::Extended, true) + | (SupportedChannelTypes::GroupAndExtended, true) => { + self.handle_set_custom_mining_job_success(m).await + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_SUCCESS, + )), + }, + + SetCustomMiningJobError(m) => match (channel_type, work_selection) { + (SupportedChannelTypes::Extended, true) + | (SupportedChannelTypes::Group, true) + | (SupportedChannelTypes::GroupAndExtended, true) => { + self.handle_set_custom_mining_job_error(m).await + } + _ => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_SET_CUSTOM_MINING_JOB_ERROR, + )), + }, + + SetTarget(m) => self.handle_set_target(m).await, + + SetGroupChannel(m) => match channel_type { + SupportedChannelTypes::Group | SupportedChannelTypes::GroupAndExtended => { + self.handle_set_group_channel(m).await + } + _ => Err(Error::UnexpectedMessage(MESSAGE_TYPE_SET_GROUP_CHANNEL)), + }, + + _ => Err(Error::UnexpectedMessage(0)), + } + } + } + + async fn handle_open_standard_mining_channel_success( + &mut self, + msg: OpenStandardMiningChannelSuccess, + ) -> Result<(), Error>; + + async fn handle_open_extended_mining_channel_success( + &mut self, + msg: OpenExtendedMiningChannelSuccess, + ) -> Result<(), Error>; + + async fn handle_open_mining_channel_error( + &mut self, + msg: OpenMiningChannelError, + ) -> Result<(), Error>; + + async fn handle_update_channel_error(&mut self, msg: UpdateChannelError) -> Result<(), Error>; + + async fn handle_close_channel(&mut self, msg: CloseChannel) -> Result<(), Error>; + + async fn handle_set_extranonce_prefix(&mut self, msg: SetExtranoncePrefix) + -> Result<(), Error>; + + async fn handle_submit_shares_success(&mut self, msg: SubmitSharesSuccess) + -> Result<(), Error>; + + async fn handle_submit_shares_error(&mut self, msg: SubmitSharesError) -> Result<(), Error>; + + async fn handle_new_mining_job(&mut self, msg: NewMiningJob) -> Result<(), Error>; + + async fn handle_new_extended_mining_job( + &mut self, + msg: NewExtendedMiningJob, + ) -> Result<(), Error>; + + async fn handle_set_new_prev_hash(&mut self, msg: SetNewPrevHash) -> Result<(), Error>; + + async fn handle_set_custom_mining_job_success( + &mut self, + msg: SetCustomMiningJobSuccess, + ) -> Result<(), Error>; + + async fn handle_set_custom_mining_job_error( + &mut self, + msg: SetCustomMiningJobError, + ) -> Result<(), Error>; + + async fn handle_set_target(&mut self, msg: SetTarget) -> Result<(), Error>; + + async fn handle_set_group_channel(&mut self, msg: SetGroupChannel) -> Result<(), Error>; +} diff --git a/protocols/v2/handlers-sv2/src/template_distribution.rs b/protocols/v2/handlers-sv2/src/template_distribution.rs new file mode 100644 index 0000000000..f81e807297 --- /dev/null +++ b/protocols/v2/handlers-sv2/src/template_distribution.rs @@ -0,0 +1,213 @@ +use crate::error::HandlerError as Error; +use parsers_sv2::TemplateDistribution; +use template_distribution_sv2::{ + CoinbaseOutputConstraints, NewTemplate, RequestTransactionData, RequestTransactionDataError, + RequestTransactionDataSuccess, SetNewPrevHash, SubmitSolution, +}; + +use core::convert::TryInto; +use template_distribution_sv2::*; + +pub trait ParseTemplateDistributionMessagesFromServerSync { + fn handle_template_distribution_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: TemplateDistribution<'_> = (message_type, payload).try_into()?; + self.dispatch_template_distribution(parsed) + } + + fn dispatch_template_distribution( + &mut self, + message: TemplateDistribution<'_>, + ) -> Result<(), Error> { + match message { + TemplateDistribution::NewTemplate(m) => self.handle_new_template(m), + TemplateDistribution::SetNewPrevHash(m) => self.handle_set_new_prev_hash(m), + TemplateDistribution::RequestTransactionDataSuccess(m) => { + self.handle_request_tx_data_success(m) + } + TemplateDistribution::RequestTransactionDataError(m) => { + self.handle_request_tx_data_error(m) + } + + TemplateDistribution::CoinbaseOutputConstraints(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_COINBASE_OUTPUT_CONSTRAINTS, + )), + TemplateDistribution::RequestTransactionData(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_REQUEST_TRANSACTION_DATA, + )), + TemplateDistribution::SubmitSolution(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_SUBMIT_SOLUTION)) + } + } + } + fn handle_new_template(&mut self, msg: NewTemplate) -> Result<(), Error>; + + fn handle_set_new_prev_hash(&mut self, msg: SetNewPrevHash) -> Result<(), Error>; + + fn handle_request_tx_data_success( + &mut self, + msg: RequestTransactionDataSuccess, + ) -> Result<(), Error>; + + fn handle_request_tx_data_error( + &mut self, + msg: RequestTransactionDataError, + ) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseTemplateDistributionMessagesFromServerAsync { + async fn handle_template_distribution_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result, _> = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_template_distribution(parsed).await + } + } + + async fn dispatch_template_distribution( + &mut self, + message: TemplateDistribution<'_>, + ) -> Result<(), Error> { + async move { + match message { + TemplateDistribution::NewTemplate(m) => self.handle_new_template(m).await, + TemplateDistribution::SetNewPrevHash(m) => self.handle_set_new_prev_hash(m).await, + TemplateDistribution::RequestTransactionDataSuccess(m) => { + self.handle_request_tx_data_success(m).await + } + TemplateDistribution::RequestTransactionDataError(m) => { + self.handle_request_tx_data_error(m).await + } + + TemplateDistribution::CoinbaseOutputConstraints(_) => Err( + Error::UnexpectedMessage(MESSAGE_TYPE_COINBASE_OUTPUT_CONSTRAINTS), + ), + TemplateDistribution::RequestTransactionData(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_REQUEST_TRANSACTION_DATA, + )), + TemplateDistribution::SubmitSolution(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_SUBMIT_SOLUTION)) + } + } + } + } + async fn handle_new_template(&mut self, msg: NewTemplate) -> Result<(), Error>; + + async fn handle_set_new_prev_hash(&mut self, msg: SetNewPrevHash) -> Result<(), Error>; + + async fn handle_request_tx_data_success( + &mut self, + msg: RequestTransactionDataSuccess, + ) -> Result<(), Error>; + + async fn handle_request_tx_data_error( + &mut self, + msg: RequestTransactionDataError, + ) -> Result<(), Error>; +} + +pub trait ParseTemplateDistributionMessagesFromClientSync { + fn handle_template_distribution_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: TemplateDistribution<'_> = (message_type, payload).try_into()?; + self.dispatch_template_distribution(parsed) + } + + fn dispatch_template_distribution( + &mut self, + message: TemplateDistribution<'_>, + ) -> Result<(), Error> { + match message { + TemplateDistribution::CoinbaseOutputConstraints(m) => { + self.handle_coinbase_output_constraints(m) + } + TemplateDistribution::RequestTransactionData(m) => self.handle_request_tx_data(m), + TemplateDistribution::SubmitSolution(m) => self.handle_submit_solution(m), + + TemplateDistribution::NewTemplate(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_NEW_TEMPLATE)) + } + TemplateDistribution::SetNewPrevHash(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_SET_NEW_PREV_HASH)) + } + TemplateDistribution::RequestTransactionDataSuccess(_) => Err( + Error::UnexpectedMessage(MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_SUCCESS), + ), + TemplateDistribution::RequestTransactionDataError(_) => Err(Error::UnexpectedMessage( + MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_ERROR, + )), + } + } + + fn handle_coinbase_output_constraints( + &mut self, + msg: CoinbaseOutputConstraints, + ) -> Result<(), Error>; + + fn handle_request_tx_data(&mut self, msg: RequestTransactionData) -> Result<(), Error>; + fn handle_submit_solution(&mut self, msg: SubmitSolution) -> Result<(), Error>; +} + +#[trait_variant::make(Send)] +pub trait ParseTemplateDistributionMessagesFromClientAsync { + async fn handle_template_distribution_message( + &mut self, + message_type: u8, + payload: &mut [u8], + ) -> Result<(), Error> { + let parsed: Result, _> = (message_type, payload).try_into(); + async move { + let parsed = parsed?; + self.dispatch_template_distribution(parsed).await + } + } + + async fn dispatch_template_distribution( + &mut self, + message: TemplateDistribution<'_>, + ) -> Result<(), Error> { + async move { + match message { + TemplateDistribution::CoinbaseOutputConstraints(m) => { + self.handle_coinbase_output_constraints(m).await + } + TemplateDistribution::RequestTransactionData(m) => { + self.handle_request_tx_data(m).await + } + TemplateDistribution::SubmitSolution(m) => self.handle_submit_solution(m).await, + + TemplateDistribution::NewTemplate(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_NEW_TEMPLATE)) + } + TemplateDistribution::SetNewPrevHash(_) => { + Err(Error::UnexpectedMessage(MESSAGE_TYPE_SET_NEW_PREV_HASH)) + } + TemplateDistribution::RequestTransactionDataSuccess(_) => Err( + Error::UnexpectedMessage(MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_SUCCESS), + ), + TemplateDistribution::RequestTransactionDataError(_) => Err( + Error::UnexpectedMessage(MESSAGE_TYPE_REQUEST_TRANSACTION_DATA_ERROR), + ), + } + } + } + + async fn handle_coinbase_output_constraints( + &mut self, + msg: CoinbaseOutputConstraints, + ) -> Result<(), Error>; + + async fn handle_request_tx_data(&mut self, msg: RequestTransactionData) -> Result<(), Error>; + async fn handle_submit_solution(&mut self, msg: SubmitSolution) -> Result<(), Error>; +} diff --git a/protocols/v2/roles-logic-sv2/Cargo.toml b/protocols/v2/roles-logic-sv2/Cargo.toml index 15826acc37..b03ecc2276 100644 --- a/protocols/v2/roles-logic-sv2/Cargo.toml +++ b/protocols/v2/roles-logic-sv2/Cargo.toml @@ -16,6 +16,7 @@ keywords = ["stratum", "mining", "bitcoin", "protocol"] bitcoin = { version = "0.32.5" } channels_sv2 = { path = "../channels-sv2", version = "^0.1.0" } parsers_sv2 = { path = "../parsers-sv2", version = "^0.1.0" } +handlers_sv2 = { path = "../handlers-sv2", version = "^0.1.0" } common_messages_sv2 = { path = "../../../protocols/v2/subprotocols/common-messages", version = "^5.0.0" } mining_sv2 = { path = "../../../protocols/v2/subprotocols/mining", version = "^4.0.0" } template_distribution_sv2 = { path = "../../../protocols/v2/subprotocols/template-distribution", version = "^3.0.0" } diff --git a/protocols/v2/roles-logic-sv2/src/lib.rs b/protocols/v2/roles-logic-sv2/src/lib.rs index 9d12f3975d..99968d6edb 100644 --- a/protocols/v2/roles-logic-sv2/src/lib.rs +++ b/protocols/v2/roles-logic-sv2/src/lib.rs @@ -28,6 +28,7 @@ pub use channels_sv2; pub use codec_sv2; pub use common_messages_sv2; pub use errors::Error; +pub use handlers_sv2; pub use job_declaration_sv2; pub use mining_sv2; pub use parsers_sv2; diff --git a/roles/Cargo.lock b/roles/Cargo.lock index 1503264be7..8f8a089d80 100644 --- a/roles/Cargo.lock +++ b/roles/Cargo.lock @@ -1187,6 +1187,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlers_sv2" +version = "0.1.0" +dependencies = [ + "binary_sv2", + "common_messages_sv2", + "job_declaration_sv2", + "mining_sv2", + "parsers_sv2", + "template_distribution_sv2", + "trait-variant", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -2210,6 +2223,7 @@ dependencies = [ "channels_sv2", "codec_sv2", "common_messages_sv2", + "handlers_sv2", "hex-conservative 0.3.0", "job_declaration_sv2", "mining_sv2", @@ -2764,22 +2778,38 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "translator_sv2" -version = "1.0.0" +version = "2.0.0" dependencies = [ "async-channel 1.9.0", "async-recursion 0.3.2", + "binary_sv2", "buffer_sv2", "clap", + "codec_sv2", "config", "config-helpers", "error_handling", + "framing_sv2", "futures", "key-utils", + "network_helpers_sv2", "once_cell", "primitive-types", "rand 0.8.5", + "roles_logic_sv2", "serde", "serde_json", "sha2 0.10.8", diff --git a/roles/translator/Cargo.toml b/roles/translator/Cargo.toml index a1a0094f7c..1118ce7ba6 100644 --- a/roles/translator/Cargo.toml +++ b/roles/translator/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "translator_sv2" -version = "1.0.0" +version = "2.0.0" authors = ["The Stratum V2 Developers"] edition = "2021" -description = "Server used to bridge SV1 miners to SV2 pools" +description = "SV1 to SV2 translation proxy with improved architecture" documentation = "https://docs.rs/translator_sv2" readme = "README.md" homepage = "https://stratumprotocol.org" repository = "https://github.com/stratum-mining/stratum" license = "MIT OR Apache-2.0" -keywords = ["stratum", "mining", "bitcoin", "protocol"] +keywords = ["stratum", "mining", "bitcoin", "protocol", "translator", "proxy"] [lib] name = "translator_sv2" @@ -20,11 +20,16 @@ name = "translator_sv2" path = "src/main.rs" [dependencies] -stratum-common = { path = "../../common", features = ["with_network_helpers"] } +stratum-common = { path = "../../common" } async-channel = "1.5.1" async-recursion = "0.3.2" +binary_sv2 = { path = "../../protocols/v2/binary-sv2" } buffer_sv2 = { path = "../../utils/buffer" } +codec_sv2 = { path = "../../protocols/v2/codec-sv2", features = ["noise_sv2", "with_buffer_pool"] } +framing_sv2 = { path = "../../protocols/v2/framing-sv2" } +network_helpers_sv2 = { path = "../roles-utils/network-helpers", features=["with_buffer_pool", "sv1"] } once_cell = "1.12.0" +roles_logic_sv2 = { path = "../../protocols/v2/roles-logic-sv2" } serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } serde_json = { version = "1.0.64", default-features = false, features = ["alloc"] } futures = "0.3.25" diff --git a/roles/translator/README.md b/roles/translator/README.md index 705f605a9d..6f8e9d93b5 100644 --- a/roles/translator/README.md +++ b/roles/translator/README.md @@ -1,11 +1,10 @@ - # SV1 to SV2 Translator Proxy -This proxy is designed to sit in between a SV1 Downstream role (most typically Mining Device(s) -running SV1 firmware) and a SV2 Upstream role (most typically a SV2 Pool Server with Extended -Channel support). +A proxy that translates between Stratum V1 (SV1) and Stratum V2 (SV2) mining protocols. This translator enables SV1 mining devices to connect to SV2 pools and infrastructure, bridging the gap between legacy mining hardware and modern mining protocols. + +## Architecture Overview -The most typical high level configuration is: +The translator sits between SV1 downstream roles (mining devices) and SV2 upstream roles (pool servers or proxies), providing seamless protocol translation and advanced features like channel aggregation and failover. ``` <--- Most Downstream ----------------------------------------- Most Upstream ---> @@ -18,45 +17,182 @@ The most typical high level configuration is: | +-------------------+ +------------------+ | | +-----------------+ | | | | | +---------------------------------------------------+ +------------------------+ +``` + +## Configuration + +### Configuration File Structure + +The translator uses TOML configuration files with the following structure: + +```toml +# Downstream SV1 Connection (where miners connect) +downstream_address = "0.0.0.0" +downstream_port = 34255 + +# Protocol Version Support +max_supported_version = 2 +min_supported_version = 2 + +# Extranonce Configuration +min_extranonce2_size = 4 # Min: 2, Max: 16 (CGminer max: 8) + +# User Identity (appended with counter for each miner) +user_identity = "your_username_here" + +# Channel Configuration +aggregate_channels = true # true: shared channel, false: individual channels +# Downstream Difficulty Configuration +[downstream_difficulty_config] +min_individual_miner_hashrate = 10_000_000_000_000.0 # 10 TH/s +shares_per_minute = 6.0 + +# Upstream SV2 Connections (supports multiple with failover) +[[upstreams]] +address = "127.0.0.1" +port = 34254 +authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" + +[[upstreams]] +address = "backup.pool.com" +port = 34254 +authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" ``` -## Setup +### Configuration Parameters + +#### **Downstream Configuration** +- `downstream_address`: IP address for SV1 miners to connect to +- `downstream_port`: Port for SV1 miners to connect to -### Configuration File +#### **Protocol Configuration** +- `max_supported_version`/`min_supported_version`: SV2 protocol version support +- `min_extranonce2_size`: Minimum extranonce2 size (affects mining efficiency) -`tproxy-config-local-jdc-example.toml` and `tproxy-config-local-pool-example.toml` are examples of configuration files for the Translator Proxy. +#### **Channel Configuration** +- `aggregate_channels`: + - `true`: All miners share one upstream extended channel (more efficient) + - `false`: Each miner gets its own upstream extended channel (more isolated) +- `user_identity`: Username for pool authentication (auto-suffixed per miner) -The configuration file contains the following information: +#### **Difficulty Configuration** +- `min_individual_miner_hashrate`: Expected hashrate of weakest miner (in H/s) +- `shares_per_minute`: Target share submission rate -1. The SV2 Upstream connection information which includes the SV2 Pool authority public key - (`upstream_authority_pubkey`) and the SV2 Pool connection address (`upstream_address`) and port - (`upstream_port`). -2. The SV1 Downstream socket information which includes the listening IP address - (`downstream_address`) and port (`downstream_port`). -3. The maximum and minimum SRI versions (`max_supported_version` and `min_supported_version`) that - the Translator Proxy implementer wants to support. Currently the only available version is `2`. -4. The desired minimum `extranonce2` size that the Translator Proxy implementer wants to use - (`min_extranonce2_size`). The `extranonce2` size is ultimately decided by the SV2 Upstream role, - but if the specified size meets the SV2 Upstream role's requirements, the size specified in this - configuration file should be favored. -5. The downstream difficulty params such as: -- the hashrate (hashes/s) of the weakest Mining Device that will be connecting to the Translator Proxy (`min_individual_miner_hashrate`) -- the number of shares per minute that Mining Devices should be sending to the Translator Proxy (`shares_per_minute`). -6. The upstream difficulty params such as: -- the interval in seconds to elapse before updating channel hashrate with the pool (`channel_diff_update_interval`) -- the estimated aggregate hashrate of all SV1 Downstream roles (`channel_nominal_hashrate`) +#### **Upstream Configuration** +- `address`/`port`: SV2 upstream server connection details +- `authority_pubkey`: Public key for SV2 connection authentication -### Run +## Usage + +### Installation & Build + +```bash +# Clone the repository +git clone https://github.com/stratum-mining/stratum.git +cd stratum -There are two files in `roles/translator/config-examples`: -- `tproxy-config-local-jdc-example.toml` which assumes the Job Declaration protocol is used and a JD Client is deployed locally -- `tproxy-config-local-pool-example.toml` which assumes Job Declaration protocol is NOT used, and a Pool is deployed locally +# Build the translator +cargo build --release -p translator_sv2 +``` + +### Running the Translator + +#### **With Local Pool** +```bash +cd roles/translator +cargo run -- -c config-examples/tproxy-config-local-pool-example.toml +``` +#### **With Job Declaration Client** ```bash -cd roles/translator/config-examples/ -cargo run -- -c tproxy-config-local-jdc-example.toml +cd roles/translator +cargo run -- -c config-examples/tproxy-config-local-jdc-example.toml +``` + +#### **With Hosted Pool** +```bash +cd roles/translator +cargo run -- -c config-examples/tproxy-config-hosted-pool-example.toml +``` + +### Command Line Options + +```bash +# Use specific config file +translator_sv2 -c /path/to/config.toml +translator_sv2 --config /path/to/config.toml + +# Show help +translator_sv2 -h +translator_sv2 --help +``` + +## Configuration Examples + +### Example 1: Local Pool Setup +For connecting to a local SV2 pool server: + +```toml +downstream_address = "0.0.0.0" +downstream_port = 34255 +user_identity = "miner_farm_1" +aggregate_channels = true + +[downstream_difficulty_config] +min_individual_miner_hashrate = 10_000_000_000_000.0 +shares_per_minute = 6.0 + +[[upstreams]] +address = "127.0.0.1" +port = 34254 +authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" +``` + +### Example 2: High-Availability Setup +For production environments with failover: + +```toml +downstream_address = "0.0.0.0" +downstream_port = 34255 +user_identity = "production_farm" +aggregate_channels = true + +[downstream_difficulty_config] +min_individual_miner_hashrate = 50_000_000_000_000.0 # 50 TH/s +shares_per_minute = 10.0 + +# Primary upstream +[[upstreams]] +address = "primary.pool.com" +port = 34254 +authority_pubkey = "primary_pool_pubkey" + +# Backup upstream +[[upstreams]] +address = "backup.pool.com" +port = 34254 +authority_pubkey = "backup_pool_pubkey" +``` + +## Architecture Details + +### **Component Overview** + +1. **SV1 Server**: Handles incoming SV1 connections from mining devices +2. **SV2 Upstream**: Manages connections to SV2 pool servers with failover +3. **Channel Manager**: Orchestrates message routing and protocol translation +4. **Task Manager**: Manages async task lifecycle and coordination +5. **Status System**: Provides real-time monitoring and health reporting + +### **Channel Modes** -### Limitations +- **Aggregated Mode**: All miners share one extended channel + - More efficient for large farms + - Reduced upstream connection overhead + - Shared work distribution -The current implementation always replies to Sv1 `mining.submit` with `"result": true`, regardless of whether the share was rejected on Sv2 upstream. \ No newline at end of file +- **Non-Aggregated Mode**: Each miner gets individual upstream channel + - Better isolation between miners + - Individual difficulty adjustment by the upstream Pool \ No newline at end of file diff --git a/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml b/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml index aa616fe832..baaf7c4e6b 100644 --- a/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml +++ b/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml @@ -3,11 +3,6 @@ # upstream_address = "18.196.32.109" # upstream_port = 3336 -# Hosted SRI Pool Upstream Connection -upstream_address = "75.119.150.111" -upstream_port = 34254 -upstream_authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" - # Local Mining Device Downstream Connection downstream_address = "0.0.0.0" downstream_port = 34255 @@ -22,6 +17,13 @@ min_supported_version = 2 # Min value: 2 min_extranonce2_size = 4 +# User identity/username for pool connection +# This will be appended with a counter for each mining client (e.g., username.miner1, username.miner2) +user_identity = "your_username_here" + +# Aggregate channels: if true, all miners share one upstream channel; if false, each miner gets its own channel +aggregate_channels = true + # Enable this option to set a predefined log file path. # When enabled, logs will always be written to this file. # The CLI option --log-file (or -f) will override this setting if provided. @@ -30,12 +32,11 @@ min_extranonce2_size = 4 # Difficulty params [downstream_difficulty_config] # hashes/s of the weakest miner that will be connecting (e.g.: 10 Th/s = 10_000_000_000_000.0) -min_individual_miner_hashrate=10_000_000_000_000.0 +min_individual_miner_hashrate=10_000_000.0 # target number of shares per minute the miner should be sending shares_per_minute = 6.0 -[upstream_difficulty_config] -# interval in seconds to elapse before updating channel hashrate with the pool -channel_diff_update_interval = 60 -# estimated accumulated hashrate of all downstream miners (e.g.: 10 Th/s = 10_000_000_000_000.0) -channel_nominal_hashrate = 10_000_000_000_000.0 +[[upstreams]] +address = "75.119.150.111" +port = 34254 +authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" \ No newline at end of file diff --git a/roles/translator/config-examples/tproxy-config-local-jdc-example.toml b/roles/translator/config-examples/tproxy-config-local-jdc-example.toml index aa53dd40f3..af529aa35c 100644 --- a/roles/translator/config-examples/tproxy-config-local-jdc-example.toml +++ b/roles/translator/config-examples/tproxy-config-local-jdc-example.toml @@ -3,11 +3,6 @@ # upstream_address = "18.196.32.109" # upstream_port = 3336 -# Local SRI JDC Upstream Connection -upstream_address = "127.0.0.1" -upstream_port = 34265 -upstream_authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" - # Local Mining Device Downstream Connection downstream_address = "0.0.0.0" downstream_port = 34255 @@ -22,6 +17,13 @@ min_supported_version = 2 # Min value: 2 min_extranonce2_size = 4 +# User identity/username for pool connection +# This will be appended with a counter for each mining client (e.g., username.miner1, username.miner2) +user_identity = "your_username_here" + +# Aggregate channels: if true, all miners share one upstream channel; if false, each miner gets its own channel +aggregate_channels = true + # Enable this option to set a predefined log file path. # When enabled, logs will always be written to this file. # The CLI option --log-file (or -f) will override this setting if provided. @@ -34,8 +36,8 @@ min_individual_miner_hashrate=10_000_000_000_000.0 # target number of shares per minute the miner should be sending shares_per_minute = 6.0 -[upstream_difficulty_config] -# interval in seconds to elapse before updating channel hashrate with the pool -channel_diff_update_interval = 60 -# estimated accumulated hashrate of all downstream miners (e.g.: 10 Th/s = 10_000_000_000_000.0) -channel_nominal_hashrate = 10_000_000_000_000.0 + +[[upstreams]] +address = "127.0.0.1" +port = 34265 +authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" \ No newline at end of file diff --git a/roles/translator/config-examples/tproxy-config-local-pool-example.toml b/roles/translator/config-examples/tproxy-config-local-pool-example.toml index bc9e552277..6f9856f2c2 100644 --- a/roles/translator/config-examples/tproxy-config-local-pool-example.toml +++ b/roles/translator/config-examples/tproxy-config-local-pool-example.toml @@ -3,11 +3,6 @@ # upstream_address = "18.196.32.109" # upstream_port = 3336 -# Local SRI Pool Upstream Connection -upstream_address = "127.0.0.1" -upstream_port = 34254 -upstream_authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" - # Local Mining Device Downstream Connection downstream_address = "0.0.0.0" downstream_port = 34255 @@ -22,6 +17,13 @@ min_supported_version = 2 # Min value: 2 min_extranonce2_size = 4 +# User identity/username for pool connection +# This will be appended with a counter for each mining client (e.g., username.miner1, username.miner2) +user_identity = "your_username_here" + +# Aggregate channels: if true, all miners share one upstream channel; if false, each miner gets its own channel +aggregate_channels = true + # Enable this option to set a predefined log file path. # When enabled, logs will always be written to this file. # The CLI option --log-file (or -f) will override this setting if provided. @@ -34,8 +36,12 @@ min_individual_miner_hashrate=10_000_000_000_000.0 # target number of shares per minute the miner should be sending shares_per_minute = 6.0 -[upstream_difficulty_config] -# interval in seconds to elapse before updating channel hashrate with the pool -channel_diff_update_interval = 60 -# estimated accumulated hashrate of all downstream miners (e.g.: 10 Th/s = 10_000_000_000_000.0) -channel_nominal_hashrate = 10_000_000_000_000.0 +[[upstreams]] +address = "127.0.0.1" +port = 34254 +authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" + +[[upstreams]] +address = "75.119.150.111" +port = 34254 +authority_pubkey = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" \ No newline at end of file diff --git a/roles/translator/src/args.rs b/roles/translator/src/args.rs index 2baa9ff600..e43746ccaa 100644 --- a/roles/translator/src/args.rs +++ b/roles/translator/src/args.rs @@ -6,10 +6,7 @@ use clap::Parser; use ext_config::{Config, File, FileFormat}; use std::path::PathBuf; use tracing::error; -use translator_sv2::{ - config::TranslatorConfig, - error::{Error, ProxyResult}, -}; +use translator_sv2::{config::TranslatorConfig, error::TproxyError}; /// Holds the parsed CLI arguments. #[derive(Parser, Debug)] @@ -32,14 +29,14 @@ pub struct Args { /// Process CLI args, if any. #[allow(clippy::result_large_err)] -pub fn process_cli_args<'a>() -> ProxyResult<'a, TranslatorConfig> { +pub fn process_cli_args() -> Result { // Parse CLI arguments let args = Args::parse(); // Build configuration from the provided file path let config_path = args.config_path.to_str().ok_or_else(|| { error!("Invalid configuration path."); - Error::BadCliArgs + TproxyError::BadCliArgs })?; let settings = Config::builder() diff --git a/roles/translator/src/lib/config.rs b/roles/translator/src/lib/config.rs index 85f5b522d2..0611613de3 100644 --- a/roles/translator/src/lib/config.rs +++ b/roles/translator/src/lib/config.rs @@ -10,7 +10,6 @@ //! - Downstream interface address and port ([`DownstreamConfig`]) //! - Supported protocol versions //! - Downstream difficulty adjustment parameters ([`DownstreamDifficultyConfig`]) -//! - Upstream difficulty adjustment parameters ([`UpstreamDifficultyConfig`]) use std::path::{Path, PathBuf}; use key_utils::Secp256k1PublicKey; @@ -19,12 +18,7 @@ use serde::Deserialize; /// Configuration for the Translator. #[derive(Debug, Deserialize, Clone)] pub struct TranslatorConfig { - /// The address of the upstream server. - pub upstream_address: String, - /// The port of the upstream server. - pub upstream_port: u16, - /// The Secp256k1 public key used to authenticate the upstream authority. - pub upstream_authority_pubkey: Secp256k1PublicKey, + pub upstreams: Vec, /// The address for the downstream interface. pub downstream_address: String, /// The port for the downstream interface. @@ -35,50 +29,36 @@ pub struct TranslatorConfig { pub min_supported_version: u16, /// The minimum size required for the extranonce2 field in mining submissions. pub min_extranonce2_size: u16, + /// The user identity/username to use when connecting to the pool. + /// This will be appended with a counter for each mining channel (e.g., username.miner1, + /// username.miner2). + pub user_identity: String, /// Configuration settings for managing difficulty on the downstream connection. pub downstream_difficulty_config: DownstreamDifficultyConfig, - /// Configuration settings for managing difficulty on the upstream connection. - pub upstream_difficulty_config: UpstreamDifficultyConfig, + /// Whether to aggregate all downstream connections into a single upstream channel. + /// If true, all miners share one channel. If false, each miner gets its own channel. + pub aggregate_channels: bool, /// The path to the log file for the Translator. log_file: Option, } -impl TranslatorConfig { - pub fn set_log_dir(&mut self, log_dir: Option) { - if let Some(dir) = log_dir { - self.log_file = Some(dir); - } - } - pub fn log_dir(&self) -> Option<&Path> { - self.log_file.as_deref() - } -} - -/// Configuration settings specific to the upstream connection. -pub struct UpstreamConfig { +#[derive(Debug, Deserialize, Clone)] +pub struct Upstream { /// The address of the upstream server. - address: String, + pub address: String, /// The port of the upstream server. - port: u16, + pub port: u16, /// The Secp256k1 public key used to authenticate the upstream authority. - authority_pubkey: Secp256k1PublicKey, - /// Configuration settings for managing difficulty on the upstream connection. - difficulty_config: UpstreamDifficultyConfig, + pub authority_pubkey: Secp256k1PublicKey, } -impl UpstreamConfig { +impl Upstream { /// Creates a new `UpstreamConfig` instance. - pub fn new( - address: String, - port: u16, - authority_pubkey: Secp256k1PublicKey, - difficulty_config: UpstreamDifficultyConfig, - ) -> Self { + pub fn new(address: String, port: u16, authority_pubkey: Secp256k1PublicKey) -> Self { Self { address, port, authority_pubkey, - difficulty_config, } } } @@ -108,26 +88,36 @@ impl TranslatorConfig { /// Creates a new `TranslatorConfig` instance by combining upstream and downstream /// configurations and specifying version and extranonce constraints. pub fn new( - upstream: UpstreamConfig, + upstreams: Vec, downstream: DownstreamConfig, max_supported_version: u16, min_supported_version: u16, min_extranonce2_size: u16, + user_identity: String, + aggregate_channels: bool, ) -> Self { Self { - upstream_address: upstream.address, - upstream_port: upstream.port, - upstream_authority_pubkey: upstream.authority_pubkey, + upstreams, downstream_address: downstream.address, downstream_port: downstream.port, max_supported_version, min_supported_version, min_extranonce2_size, + user_identity, downstream_difficulty_config: downstream.difficulty_config, - upstream_difficulty_config: upstream.difficulty_config, + aggregate_channels, log_file: None, } } + + pub fn set_log_dir(&mut self, log_dir: Option) { + if let Some(dir) = log_dir { + self.log_file = Some(dir); + } + } + pub fn log_dir(&self) -> Option<&Path> { + self.log_file.as_deref() + } } /// Configuration settings for managing difficulty adjustments on the downstream connection. @@ -167,35 +157,3 @@ impl PartialEq for DownstreamDifficultyConfig { == self.min_individual_miner_hashrate.round() as u32 } } - -/// Configuration settings for difficulty adjustments on the upstream connection. -#[derive(Debug, Deserialize, Clone)] -pub struct UpstreamDifficultyConfig { - /// The interval in seconds at which the channel difficulty should be updated. - pub channel_diff_update_interval: u32, - /// The nominal hashrate for the channel, used in difficulty calculations. - pub channel_nominal_hashrate: f32, - /// The timestamp of the last difficulty update for the channel. - #[serde(default = "u64::default")] - pub timestamp_of_last_update: u64, - /// Indicates whether shares from downstream should be aggregated before submitting upstream. - #[serde(default = "bool::default")] - pub should_aggregate: bool, -} - -impl UpstreamDifficultyConfig { - /// Creates a new `UpstreamDifficultyConfig` instance. - pub fn new( - channel_diff_update_interval: u32, - channel_nominal_hashrate: f32, - timestamp_of_last_update: u64, - should_aggregate: bool, - ) -> Self { - Self { - channel_diff_update_interval, - channel_nominal_hashrate, - timestamp_of_last_update, - should_aggregate, - } - } -} diff --git a/roles/translator/src/lib/downstream_sv1/diff_management.rs b/roles/translator/src/lib/downstream_sv1/diff_management.rs deleted file mode 100644 index e1e101a43c..0000000000 --- a/roles/translator/src/lib/downstream_sv1/diff_management.rs +++ /dev/null @@ -1,426 +0,0 @@ -//! ## Downstream SV1 Difficulty Management Module -//! -//! This module contains the logic and helper functions -//! for managing difficulty and hashrate adjustments for downstream mining clients -//! communicating via the SV1 protocol. -//! -//! It handles tasks such as: -//! - Converting SV2 targets received from upstream into SV1 difficulty values. -//! - Calculating and updating individual miner hashrates based on submitted shares. -//! - Preparing SV1 `mining.set_difficulty` messages. -//! - Potentially managing difficulty thresholds and adjustment logic for downstream miners. - -use super::{Downstream, DownstreamMessages, SetDownstreamTarget}; - -use super::super::error::{Error, ProxyResult}; -use primitive_types::U256; -use std::{ops::Div, sync::Arc}; -use stratum_common::roles_logic_sv2::{ - codec_sv2::binary_sv2, - mining_sv2::Target, - utils::{hash_rate_to_target, Mutex}, -}; -use tracing::debug; -use v1::json_rpc; - -impl Downstream { - /// Initializes the difficulty management parameters for a downstream connection. - /// - /// This function sets the initial timestamp for the last difficulty update and - /// resets the count of submitted shares. It also adds the miner's configured - /// minimum hashrate to the aggregated channel nominal hashrate stored in the - /// upstream difficulty configuration.Finally, it sends a `SetDownstreamTarget` message upstream - /// to the Bridge to inform it of the initial target for this new connection, derived from - /// the provided `init_target`.This should typically be called once when a downstream connection - /// is established. - pub async fn init_difficulty_management(self_: Arc>) -> ProxyResult<'static, ()> { - let (connection_id, upstream_difficulty_config, miner_hashrate, init_target) = self_ - .safe_lock(|d| { - _ = d.difficulty_mgmt.reset_counter(); - ( - d.connection_id, - d.upstream_difficulty_config.clone(), - d.hashrate, - d.target.clone(), - ) - })?; - // add new connection hashrate to channel hashrate - upstream_difficulty_config.safe_lock(|u| { - u.channel_nominal_hashrate += miner_hashrate; - })?; - // update downstream target with bridge - let init_target = binary_sv2::U256::from(init_target); - Self::send_message_upstream( - self_, - DownstreamMessages::SetDownstreamTarget(SetDownstreamTarget { - channel_id: connection_id, - new_target: init_target.into(), - }), - ) - .await?; - - Ok(()) - } - - /// Removes the disconnecting miner's hashrate from the aggregated channel nominal hashrate. - /// - /// This function is called when a downstream miner disconnects to ensure that their - /// individual hashrate is subtracted from the total nominal hashrate reported for - /// the channel to the upstream server. - #[allow(clippy::result_large_err)] - pub fn remove_miner_hashrate_from_channel(self_: Arc>) -> ProxyResult<'static, ()> { - self_.safe_lock(|d| { - d.upstream_difficulty_config - .safe_lock(|u| { - let hashrate_to_subtract = d.hashrate; - if u.channel_nominal_hashrate >= hashrate_to_subtract { - u.channel_nominal_hashrate -= hashrate_to_subtract; - } else { - u.channel_nominal_hashrate = 0.0; - } - }) - .map_err(|_e| Error::PoisonLock) - })??; - Ok(()) - } - - /// Attempts to update the difficulty settings for a downstream miner based on their - /// performance. - /// - /// This function is triggered periodically or based on share submissions. It calculates - /// the miner's estimated hashrate based on the number of shares submitted and the elapsed - /// time since the last update. If the estimated hashrate has changed significantly according to - /// predefined thresholds, a new target is calculated, a `mining.set_difficulty` message is - /// sent to the miner, and a `SetDownstreamTarget` message is sent upstream to the Bridge to - /// notify it of the target change for this channel. The difficulty management parameters - /// (timestamp and share count) are then reset. - pub async fn try_update_difficulty_settings( - self_: Arc>, - ) -> ProxyResult<'static, ()> { - let (timestamp_of_last_update, shares_since_last_update, channel_id, shares_per_minute) = - self_.clone().safe_lock(|d| { - ( - d.difficulty_mgmt.last_update_timestamp(), - d.difficulty_mgmt.shares_since_last_update(), - d.connection_id, - d.shares_per_minute, - ) - })?; - debug!("Time of last diff update: {:?}", timestamp_of_last_update); - debug!("Number of shares submitted: {:?}", shares_since_last_update); - - if let Some(new_hashrate) = Self::update_miner_hashrate(self_.clone())? { - let new_target: Target = - hash_rate_to_target(new_hashrate.into(), shares_per_minute.into())?.into(); - debug!("New target from hashrate: {:?}", new_target); - let message = Self::get_set_difficulty(new_target.clone())?; - let target = binary_sv2::U256::from(new_target); - Downstream::send_message_downstream(self_.clone(), message).await?; - let update_target_msg = SetDownstreamTarget { - channel_id, - new_target: target.into(), - }; - // notify bridge of target update - Downstream::send_message_upstream( - self_.clone(), - DownstreamMessages::SetDownstreamTarget(update_target_msg), - ) - .await?; - } - Ok(()) - } - - /// Increments the counter for shares submitted by this downstream miner. - /// - /// This function is called each time a valid share is received from the miner. - /// The count is used in the difficulty adjustment logic to estimate the miner's - /// performance over a period. - #[allow(clippy::result_large_err)] - pub(super) fn save_share(self_: Arc>) -> ProxyResult<'static, ()> { - self_.safe_lock(|d| { - d.difficulty_mgmt.increment_shares_since_last_update(); - })?; - Ok(()) - } - - /// Converts an SV2 target received from upstream into an SV1 difficulty value - /// and formats it as a `mining.set_difficulty` JSON-RPC message. - #[allow(clippy::result_large_err)] - pub(super) fn get_set_difficulty(target: Target) -> ProxyResult<'static, json_rpc::Message> { - let value = Downstream::difficulty_from_target(target)?; - debug!("Difficulty from target: {:?}", value); - let set_target = v1::methods::server_to_client::SetDifficulty { value }; - let message: json_rpc::Message = set_target.into(); - Ok(message) - } - - /// Converts target received by the `SetTarget` SV2 message from the Upstream role into the - /// difficulty for the Downstream role sent via the SV1 `mining.set_difficulty` message. - #[allow(clippy::result_large_err)] - pub(super) fn difficulty_from_target(target: Target) -> ProxyResult<'static, f64> { - // reverse because target is LE and this function relies on BE - let mut target = binary_sv2::U256::from(target).to_vec(); - - target.reverse(); - - let target = target.as_slice(); - debug!("Target: {:?}", target); - - // If received target is 0, return 0 - if Downstream::is_zero(target) { - return Ok(0.0); - } - let target = U256::from_big_endian(target); - let pdiff: [u8; 32] = [ - 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - ]; - let pdiff = U256::from_big_endian(pdiff.as_ref()); - - if pdiff > target { - let diff = pdiff.div(target); - Ok(diff.low_u64() as f64) - } else { - let diff = target.div(pdiff); - let diff = diff.low_u64() as f64; - // TODO still results in a difficulty that is too low - Ok(1.0 / diff) - } - } - - /// Updates the miner's estimated hashrate and adjusts the aggregated channel nominal hashrate. - /// - /// This function calculates the miner's realized shares per minute over the period - /// since the last update and uses it, along with the current target, to estimate - /// their hashrate. It then compares this new estimate to the previous one and - /// updates the miner's stored hashrate and the channel's aggregated hashrate - /// if the change is significant based on time-dependent thresholds. - #[allow(clippy::result_large_err)] - pub fn update_miner_hashrate(self_: Arc>) -> ProxyResult<'static, Option> { - let update = self_.super_safe_lock(|d| { - let previous_hashrate = d.hashrate; - let previous_target = d.target.clone(); - let update = d.difficulty_mgmt.try_vardiff( - previous_hashrate, - &previous_target, - d.shares_per_minute, - ); - if let Ok(Some(new_hashrate)) = update { - // update channel hashrate and target - let new_target: Target = - hash_rate_to_target(new_hashrate.into(), d.shares_per_minute.into()) - .expect("Something went wrong while target calculation") - .into(); - d.hashrate = new_hashrate; - d.target = new_target.clone(); - let hashrate_delta = new_hashrate - previous_hashrate; - d.upstream_difficulty_config.super_safe_lock(|c| { - if c.channel_nominal_hashrate + hashrate_delta > 0.0 { - c.channel_nominal_hashrate += hashrate_delta; - } else { - c.channel_nominal_hashrate = 0.0; - } - }); - } - update - })?; - Ok(update) - } - - /// Helper function to check if target is set to zero for some reason (typically happens when - /// Downstream role first connects). - /// https://stackoverflow.com/questions/65367552/checking-a-vecu8-to-see-if-its-all-zero - fn is_zero(buf: &[u8]) -> bool { - let (prefix, aligned, suffix) = unsafe { buf.align_to::() }; - - prefix.iter().all(|&x| x == 0) - && suffix.iter().all(|&x| x == 0) - && aligned.iter().all(|&x| x == 0) - } -} - -#[cfg(test)] -mod test { - - use crate::config::{DownstreamDifficultyConfig, UpstreamDifficultyConfig}; - use async_channel::unbounded; - use rand::{thread_rng, Rng}; - use sha2::{Digest, Sha256}; - use std::{ - sync::Arc, - time::{Duration, Instant}, - }; - use stratum_common::roles_logic_sv2::{ - self, - codec_sv2::binary_sv2::{self, U256}, - mining_sv2::Target, - utils::Mutex, - }; - - use crate::downstream_sv1::Downstream; - - #[ignore] // as described in issue #988 - #[test] - fn test_diff_management() { - let expected_shares_per_minute = 1000.0; - let total_run_time = std::time::Duration::from_secs(60); - let initial_nominal_hashrate = measure_hashrate(5); - let target = match roles_logic_sv2::utils::hash_rate_to_target( - initial_nominal_hashrate, - expected_shares_per_minute, - ) { - Ok(target) => target, - Err(_) => panic!(), - }; - - let mut share = generate_random_80_byte_array(); - let timer = std::time::Instant::now(); - let mut elapsed = std::time::Duration::from_secs(0); - let mut count = 0; - while elapsed <= total_run_time { - // start hashing util a target is met and submit to - mock_mine(target.clone().into(), &mut share); - elapsed = timer.elapsed(); - count += 1; - } - - let calculated_share_per_min = count as f32 / (elapsed.as_secs_f32() / 60.0); - // This is the error margin for a confidence of 99.99...% given the expect number of shares - // per minute TODO the review the math under it - let error_margin = get_error(expected_shares_per_minute); - let error = (calculated_share_per_min - expected_shares_per_minute as f32).abs(); - assert!( - error <= error_margin as f32, - "Calculated shares per minute are outside the 99.99...% confidence interval. Error: {error:?}, Error margin: {error_margin:?}, {calculated_share_per_min:?}" - ); - } - - fn get_error(lambda: f64) -> f64 { - let z_score_99 = 6.0; - z_score_99 * lambda.sqrt() - } - - fn mock_mine(target: Target, share: &mut [u8; 80]) { - let mut hashed: Target = [255_u8; 32].into(); - while hashed > target { - hashed = hash(share); - } - } - - // returns hashrate based on how fast the device hashes over the given duration - fn measure_hashrate(duration_secs: u64) -> f64 { - let mut share = generate_random_80_byte_array(); - let start_time = Instant::now(); - let mut hashes: u64 = 0; - let duration = Duration::from_secs(duration_secs); - - while start_time.elapsed() < duration { - for _ in 0..10000 { - hash(&mut share); - hashes += 1; - } - } - - let elapsed_secs = start_time.elapsed().as_secs_f64(); - - hashes as f64 / elapsed_secs - } - - fn hash(share: &mut [u8; 80]) -> Target { - let nonce: [u8; 8] = share[0..8].try_into().unwrap(); - let mut nonce = u64::from_le_bytes(nonce); - nonce += 1; - share[0..8].copy_from_slice(&nonce.to_le_bytes()); - let hash = Sha256::digest(&share).to_vec(); - let hash: U256<'static> = hash.try_into().unwrap(); - hash.into() - } - - fn generate_random_80_byte_array() -> [u8; 80] { - let mut rng = thread_rng(); - let mut arr = [0u8; 80]; - rng.fill(&mut arr[..]); - arr - } - - #[tokio::test] - async fn test_converge_to_spm_from_low() { - test_converge_to_spm(1.0).await - } - //TODO - //#[tokio::test] - //async fn test_converge_to_spm_from_high() { - // test_converge_to_spm(1_000_000_000_000).await - //} - - async fn test_converge_to_spm(start_hashrate: f64) { - let downstream_conf = DownstreamDifficultyConfig { - min_individual_miner_hashrate: start_hashrate as f32, // updated below - shares_per_minute: 1000.0, // 1000 shares per minute - submits_since_last_update: 0, - timestamp_of_last_update: 0, // updated below - }; - let upstream_config = UpstreamDifficultyConfig { - channel_diff_update_interval: 60, - channel_nominal_hashrate: 0.0, - timestamp_of_last_update: 0, - should_aggregate: false, - }; - let (tx_sv1_submit, _rx_sv1_submit) = unbounded(); - let (tx_outgoing, _rx_outgoing) = unbounded(); - let downstream = Downstream::new( - 1, - vec![], - vec![], - None, - None, - tx_sv1_submit, - tx_outgoing, - false, - 0, - downstream_conf.clone(), - Arc::new(Mutex::new(upstream_config)), - ); - - let total_run_time = std::time::Duration::from_secs(75); - let config_shares_per_minute = downstream_conf.shares_per_minute; - let timer = std::time::Instant::now(); - let mut elapsed = std::time::Duration::from_secs(0); - - let expected_nominal_hashrate = measure_hashrate(5); - let expected_target = match roles_logic_sv2::utils::hash_rate_to_target( - expected_nominal_hashrate, - config_shares_per_minute.into(), - ) { - Ok(target) => target, - Err(_) => panic!(), - }; - - let mut initial_target = downstream.target.clone(); - let downstream = Arc::new(Mutex::new(downstream)); - Downstream::init_difficulty_management(downstream.clone()) - .await - .unwrap(); - let mut share = generate_random_80_byte_array(); - while elapsed <= total_run_time { - mock_mine(initial_target.clone(), &mut share); - Downstream::save_share(downstream.clone()).unwrap(); - Downstream::try_update_difficulty_settings(downstream.clone()) - .await - .unwrap(); - initial_target = downstream.safe_lock(|d| d.target.clone()).unwrap(); - elapsed = timer.elapsed(); - } - let expected_0s = trailing_0s(expected_target.inner_as_ref().to_vec()); - let actual_0s = trailing_0s(binary_sv2::U256::from(initial_target.clone()).to_vec()); - assert!(expected_0s.abs_diff(actual_0s) <= 1); - } - - fn trailing_0s(mut v: Vec) -> usize { - let mut ret = 0; - while v.pop() == Some(0) { - ret += 1; - } - ret - } -} diff --git a/roles/translator/src/lib/downstream_sv1/downstream.rs b/roles/translator/src/lib/downstream_sv1/downstream.rs deleted file mode 100644 index ac2819c893..0000000000 --- a/roles/translator/src/lib/downstream_sv1/downstream.rs +++ /dev/null @@ -1,728 +0,0 @@ -//! ## Downstream SV1 Module: Downstream Connection Logic -//! -//! Defines the [`Downstream`] structure, which represents and manages an -//! individual connection from a downstream SV1 mining client. -//! -//! This module is responsible for: -//! - Accepting incoming TCP connections from SV1 miners. -//! - Handling the SV1 protocol handshake (`mining.subscribe`, `mining.authorize`, -//! `mining.configure`). -//! - Receiving SV1 `mining.submit` messages from miners. -//! - Translating SV1 `mining.submit` messages into internal [`DownstreamMessages`] (specifically -//! [`SubmitShareWithChannelId`]) and sending them to the Bridge. -//! - Receiving translated SV1 `mining.notify` messages from the Bridge and sending them to the -//! connected miner. -//! - Managing the miner's extranonce1, extranonce2 size, and version rolling parameters. -//! - Implementing downstream-specific difficulty management logic, including tracking submitted -//! shares and updating the miner's difficulty target. -//! - Implementing the necessary SV1 server traits ([`IsServer`]) and SV2 roles logic traits -//! ([`IsMiningDownstream`], [`IsDownstream`]). - -use crate::{ - config::{DownstreamDifficultyConfig, UpstreamDifficultyConfig}, - downstream_sv1, - error::ProxyResult, - status, -}; -use async_channel::{bounded, Receiver, Sender}; -use error_handling::handle_result; -use futures::{FutureExt, StreamExt}; -use tokio::{ - io::{AsyncWriteExt, BufReader}, - net::{TcpListener, TcpStream}, - sync::broadcast, - task::AbortHandle, -}; - -use super::{kill, DownstreamMessages, SubmitShareWithChannelId, SUBSCRIBE_TIMEOUT_SECS}; - -use stratum_common::roles_logic_sv2::{ - mining_sv2::Target, - utils::{hash_rate_to_target, Mutex}, - vardiff::Vardiff, - VardiffState, -}; - -use crate::error::Error; -use futures::select; -use tokio_util::codec::{FramedRead, LinesCodec}; - -use std::{net::SocketAddr, sync::Arc}; -use tracing::{debug, info, warn}; -use v1::{ - client_to_server::{self, Submit}, - json_rpc, server_to_client, - utils::{Extranonce, HexU32Be}, - IsServer, -}; - -/// The maximum allowed length for a single line (JSON-RPC message) received from an SV1 client. -const MAX_LINE_LENGTH: usize = 2_usize.pow(16); - -/// Handles the sending and receiving of messages to and from an SV2 Upstream role (most typically -/// a SV2 Pool server). -#[derive(Debug)] -pub struct Downstream { - /// The unique identifier assigned to this downstream connection/channel. - pub(super) connection_id: u32, - /// List of authorized Downstream Mining Devices. - authorized_names: Vec, - /// The extranonce1 value assigned to this downstream miner. - extranonce1: Vec, - /// `extranonce1` to be sent to the Downstream in the SV1 `mining.subscribe` message response. - //extranonce1: Vec, - //extranonce2_size: usize, - /// Version rolling mask bits - version_rolling_mask: Option, - /// Minimum version rolling mask bits size - version_rolling_min_bit: Option, - /// Sends a SV1 `mining.submit` message received from the Downstream role to the `Bridge` for - /// translation into a SV2 `SubmitSharesExtended`. - tx_sv1_bridge: Sender, - /// Sends message to the SV1 Downstream role. - tx_outgoing: Sender, - /// True if this is the first job received from `Upstream`. - first_job_received: bool, - /// The expected size of the extranonce2 field provided by the miner. - extranonce2_len: usize, - // Current Channel target - pub target: Target, - // Current channel hashrate - pub hashrate: f32, - // Shares_per_minute - pub shares_per_minute: f32, - /// Configuration and state for managing difficulty adjustments specific - /// to this individual downstream miner. - pub(super) difficulty_mgmt: Box, - /// Configuration settings for the upstream channel's difficulty management. - pub(super) upstream_difficulty_config: Arc>, -} - -impl Downstream { - // not huge fan of test specific code in codebase. - #[cfg(test)] - pub fn new( - connection_id: u32, - authorized_names: Vec, - extranonce1: Vec, - version_rolling_mask: Option, - version_rolling_min_bit: Option, - tx_sv1_bridge: Sender, - tx_outgoing: Sender, - first_job_received: bool, - extranonce2_len: usize, - difficulty_mgmt: DownstreamDifficultyConfig, - upstream_difficulty_config: Arc>, - ) -> Self { - let hashrate = difficulty_mgmt.min_individual_miner_hashrate; - let target = hash_rate_to_target(hashrate.into(), difficulty_mgmt.shares_per_minute.into()) - .unwrap() - .into(); - let downstream_difficulty_state = VardiffState::new().unwrap(); - Downstream { - connection_id, - authorized_names, - extranonce1, - version_rolling_mask, - version_rolling_min_bit, - tx_sv1_bridge, - tx_outgoing, - first_job_received, - extranonce2_len, - hashrate, - target, - shares_per_minute: difficulty_mgmt.shares_per_minute, - difficulty_mgmt: Box::new(downstream_difficulty_state), - upstream_difficulty_config, - } - } - /// Instantiates and manages a new handler for a single downstream SV1 client connection. - /// - /// This is the primary function called for each new incoming TCP stream from a miner. - /// It sets up the communication channels, initializes the `Downstream` struct state, - /// and spawns the necessary tasks to handle: - /// 1. Reading incoming messages from the miner's socket. - /// 2. Writing outgoing messages to the miner's socket. - /// 3. Sending job notifications to the miner (handling initial job and subsequent updates). - /// - /// It uses shutdown channels to coordinate graceful termination of the spawned tasks. - #[allow(clippy::too_many_arguments)] - pub async fn new_downstream( - stream: TcpStream, - connection_id: u32, - tx_sv1_bridge: Sender, - mut rx_sv1_notify: broadcast::Receiver>, - tx_status: status::Sender, - extranonce1: Vec, - last_notify: Option>, - extranonce2_len: usize, - host: String, - difficulty_config: DownstreamDifficultyConfig, - upstream_difficulty_config: Arc>, - task_collector: Arc>>, - ) { - let hashrate = difficulty_config.min_individual_miner_hashrate; - let target = - hash_rate_to_target(hashrate.into(), difficulty_config.shares_per_minute.into()) - .expect("Couldn't convert hashrate to target") - .into(); - - let downstream_difficulty_state = - VardiffState::new().expect("Couldn't initialize vardiff module"); - // Reads and writes from Downstream SV1 Mining Device Client - let (socket_reader, mut socket_writer) = stream.into_split(); - let (tx_outgoing, receiver_outgoing) = bounded(10); - - let downstream = Arc::new(Mutex::new(Downstream { - connection_id, - authorized_names: vec![], - extranonce1, - //extranonce1: extranonce1.to_vec(), - version_rolling_mask: None, - version_rolling_min_bit: None, - tx_sv1_bridge, - tx_outgoing, - first_job_received: false, - extranonce2_len, - hashrate, - target, - shares_per_minute: difficulty_config.shares_per_minute, - difficulty_mgmt: Box::new(downstream_difficulty_state), - upstream_difficulty_config, - })); - let self_ = downstream.clone(); - - let host_ = host.clone(); - // The shutdown channel is used local to the `Downstream::new_downstream()` function. - // Each task is set broadcast a shutdown message at the end of their lifecycle with - // `kill()`, and each task has a receiver to listen for the shutdown message. When a - // shutdown message is received the task should `break` its loop. For any errors that should - // shut a task down, we should `break` out of the loop, so that the `kill` function - // can send the shutdown broadcast. EXTRA: The since all downstream tasks rely on - // receiving messages with a future (either TCP recv or Receiver<_>) we use the - // futures::select! macro to merge the receiving end of a task channels into a single loop - // within the task - let (tx_shutdown, rx_shutdown): (Sender, Receiver) = async_channel::bounded(3); - - let rx_shutdown_clone = rx_shutdown.clone(); - let tx_shutdown_clone = tx_shutdown.clone(); - let tx_status_reader = tx_status.clone(); - let task_collector_mining_device = task_collector.clone(); - // Task to read from SV1 Mining Device Client socket via `socket_reader`. Depending on the - // SV1 message received, a message response is sent directly back to the SV1 Downstream - // role, or the message is sent upwards to the Bridge for translation into a SV2 message - // and then sent to the SV2 Upstream role. - let socket_reader_task = tokio::task::spawn(async move { - let reader = BufReader::new(socket_reader); - let mut messages = - FramedRead::new(reader, LinesCodec::new_with_max_length(MAX_LINE_LENGTH)); - loop { - // Read message from SV1 Mining Device Client socket - // On message receive, parse to `json_rpc:Message` and send to Upstream - // `Translator.receive_downstream` via `sender_upstream` done in - // `send_message_upstream`. - select! { - res = messages.next().fuse() => { - match res { - Some(Ok(incoming)) => { - debug!("Receiving from Mining Device {}: {:?}", &host_, &incoming); - let incoming: json_rpc::Message = handle_result!(tx_status_reader, serde_json::from_str(&incoming)); - // Handle what to do with message - // if let json_rpc::Message - - // if message is Submit Shares update difficulty management - if let v1::Message::StandardRequest(standard_req) = incoming.clone() { - if let Ok(Submit{..}) = standard_req.try_into() { - handle_result!(tx_status_reader, Self::save_share(self_.clone())); - } - } - - let res = Self::handle_incoming_sv1(self_.clone(), incoming).await; - handle_result!(tx_status_reader, res); - } - Some(Err(_)) => { - handle_result!(tx_status_reader, Err(Error::Sv1MessageTooLong)); - } - None => { - handle_result!(tx_status_reader, Err( - std::io::Error::new( - std::io::ErrorKind::ConnectionAborted, - "Connection closed by client" - ) - )); - } - } - }, - _ = rx_shutdown_clone.recv().fuse() => { - break; - } - }; - } - kill(&tx_shutdown_clone).await; - warn!("Downstream: Shutting down sv1 downstream reader"); - }); - let _ = task_collector_mining_device.safe_lock(|a| { - a.push(( - socket_reader_task.abort_handle(), - "socket_reader_task".to_string(), - )) - }); - - let rx_shutdown_clone = rx_shutdown.clone(); - let tx_shutdown_clone = tx_shutdown.clone(); - let tx_status_writer = tx_status.clone(); - let host_ = host.clone(); - - let task_collector_new_sv1_message_no_transl = task_collector.clone(); - // Task to receive SV1 message responses to SV1 messages that do NOT need translation. - // These response messages are sent directly to the SV1 Downstream role. - let socket_writer_task = tokio::task::spawn(async move { - loop { - select! { - res = receiver_outgoing.recv().fuse() => { - let to_send = handle_result!(tx_status_writer, res); - let to_send = match serde_json::to_string(&to_send) { - Ok(string) => format!("{string}\n"), - Err(_e) => { - debug!("\nDownstream: Bad SV1 server message\n"); - break; - } - }; - debug!("Sending to Mining Device: {} - {:?}", &host_, &to_send); - let res = socket_writer - .write_all(to_send.as_bytes()) - .await; - handle_result!(tx_status_writer, res); - }, - _ = rx_shutdown_clone.recv().fuse() => { - break; - } - }; - } - kill(&tx_shutdown_clone).await; - warn!( - "Downstream: Shutting down sv1 downstream writer: {}", - &host_ - ); - }); - let _ = task_collector_new_sv1_message_no_transl.safe_lock(|a| { - a.push(( - socket_writer_task.abort_handle(), - "socket_writer_task".to_string(), - )) - }); - - let tx_status_notify = tx_status; - let self_ = downstream.clone(); - - let task_collector_notify_task = task_collector.clone(); - let notify_task = tokio::task::spawn(async move { - let timeout_timer = std::time::Instant::now(); - let mut first_sent = false; - loop { - let is_a = match downstream.safe_lock(|d| !d.authorized_names.is_empty()) { - Ok(is_a) => is_a, - Err(_e) => { - debug!("\nDownstream: Poison Lock - authorized_names\n"); - break; - } - }; - if is_a && !first_sent && last_notify.is_some() { - let target = downstream - .safe_lock(|d| d.target.clone()) - .expect("downstream target couldn't be computed"); - // make sure the mining start time is initialized and reset number of shares - // submitted - handle_result!( - tx_status_notify, - Self::init_difficulty_management(downstream.clone()).await - ); - let message = - handle_result!(tx_status_notify, Self::get_set_difficulty(target)); - handle_result!( - tx_status_notify, - Downstream::send_message_downstream(downstream.clone(), message).await - ); - - let sv1_mining_notify_msg = last_notify.clone().unwrap(); - - let message: json_rpc::Message = sv1_mining_notify_msg.into(); - handle_result!( - tx_status_notify, - Downstream::send_message_downstream(downstream.clone(), message).await - ); - if let Err(_e) = downstream.clone().safe_lock(|s| { - s.first_job_received = true; - }) { - debug!("\nDownstream: Poison Lock - first_job_received\n"); - break; - } - first_sent = true; - } else if is_a { - // if hashrate has changed, update difficulty management, and send new - // mining.set_difficulty - select! { - res = rx_sv1_notify.recv().fuse() => { - // if hashrate has changed, update difficulty management, and send new mining.set_difficulty - handle_result!(tx_status_notify, Self::try_update_difficulty_settings(downstream.clone()).await); - - let sv1_mining_notify_msg = handle_result!(tx_status_notify, res); - let message: json_rpc::Message = sv1_mining_notify_msg.clone().into(); - - handle_result!(tx_status_notify, Downstream::send_message_downstream(downstream.clone(), message).await); - }, - _ = rx_shutdown.recv().fuse() => { - break; - } - }; - } else { - // timeout connection if miner does not send the authorize message after sending - // a subscribe - if timeout_timer.elapsed().as_secs() > SUBSCRIBE_TIMEOUT_SECS { - debug!( - "Downstream: miner.subscribe/miner.authorize TIMOUT for {}", - &host - ); - break; - } - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - } - let _ = Self::remove_miner_hashrate_from_channel(self_); - kill(&tx_shutdown).await; - warn!( - "Downstream: Shutting down sv1 downstream job notifier for {}", - &host - ); - }); - - let _ = task_collector_notify_task - .safe_lock(|a| a.push((notify_task.abort_handle(), "notify_task".to_string()))); - } - - /// Accepts incoming TCP connections from SV1 mining clients on the configured address. - /// - /// For each new connection, it attempts to open a new SV1 downstream channel - /// via the Bridge (`bridge.on_new_sv1_connection`). If successful, it spawns - /// a new task using `Downstream::new_downstream` to handle - /// the communication and logic for that specific miner connection. - /// This method runs indefinitely, listening for and accepting new connections. - #[allow(clippy::too_many_arguments)] - pub fn accept_connections( - downstream_addr: SocketAddr, - tx_sv1_submit: Sender, - tx_mining_notify: broadcast::Sender>, - tx_status: status::Sender, - bridge: Arc>, - downstream_difficulty_config: DownstreamDifficultyConfig, - upstream_difficulty_config: Arc>, - task_collector: Arc>>, - ) { - let accept_connections = tokio::task::spawn({ - let task_collector = task_collector.clone(); - async move { - let listener = TcpListener::bind(downstream_addr).await.unwrap(); - - while let Ok((stream, _)) = listener.accept().await { - let expected_hash_rate = - downstream_difficulty_config.min_individual_miner_hashrate; - let open_sv1_downstream = bridge - .safe_lock(|s| s.on_new_sv1_connection(expected_hash_rate)) - .unwrap(); - - let host = stream.peer_addr().unwrap().to_string(); - - match open_sv1_downstream { - Ok(opened) => { - info!("PROXY SERVER - ACCEPTING FROM DOWNSTREAM: {}", host); - Downstream::new_downstream( - stream, - opened.channel_id, - tx_sv1_submit.clone(), - tx_mining_notify.subscribe(), - tx_status.listener_to_connection(), - opened.extranonce, - opened.last_notify, - opened.extranonce2_len as usize, - host, - downstream_difficulty_config.clone(), - upstream_difficulty_config.clone(), - task_collector.clone(), - ) - .await; - } - Err(e) => { - tracing::error!( - "Failed to create a new downstream connection: {:?}", - e - ); - } - } - } - } - }); - let _ = task_collector.safe_lock(|a| { - a.push(( - accept_connections.abort_handle(), - "accept_connections".to_string(), - )) - }); - } - - /// Handles incoming SV1 JSON-RPC messages from a downstream miner. - /// - /// This function acts as the entry point for processing messages received - /// from a miner after framing. It uses the `IsServer` trait implementation - /// to parse and handle standard SV1 requests (`mining.subscribe`, `mining.authorize`, - /// `mining.submit`, `mining.configure`). Depending on the message type, it may generate a - /// direct SV1 response to be sent back to the miner or indicate that the message needs to - /// be translated and sent upstream (handled elsewhere, typically by the Bridge). - async fn handle_incoming_sv1( - self_: Arc>, - message_sv1: json_rpc::Message, - ) -> Result<(), super::super::error::Error<'static>> { - // `handle_message` in `IsServer` trait + calls `handle_request` - // TODO: Map err from V1Error to Error::V1Error - let response = self_.safe_lock(|s| s.handle_message(message_sv1)).unwrap(); - match response { - Ok(res) => { - if let Some(r) = res { - // If some response is received, indicates no messages translation is needed - // and response should be sent directly to the SV1 Downstream. Otherwise, - // message will be sent to the upstream Translator to be translated to SV2 and - // forwarded to the `Upstream` - // let sender = self_.safe_lock(|s| s.connection.sender_upstream) - if let Err(e) = Self::send_message_downstream(self_, r.into()).await { - return Err(e.into()); - } - Ok(()) - } else { - // If None response is received, indicates this SV1 message received from the - // Downstream MD is passed to the `Translator` for translation into SV2 - Ok(()) - } - } - Err(e) => Err(e.into()), - } - } - - /// Sends a SV1 JSON-RPC message to the downstream miner's socket writer task. - /// - /// This method is used to send response messages or notifications (like - /// `mining.notify` or `mining.set_difficulty`) to the connected miner. - /// The message is sent over the internal `tx_outgoing` channel, which is - /// read by the socket writer task responsible for serializing and writing - /// the message to the TCP stream. - pub(super) async fn send_message_downstream( - self_: Arc>, - response: json_rpc::Message, - ) -> Result<(), async_channel::SendError> { - let sender = self_.safe_lock(|s| s.tx_outgoing.clone()).unwrap(); - debug!("To DOWN: {:?}", response); - sender.send(response).await - } - - /// Sends a message originating from the downstream handler to the Bridge. - /// - /// This function is used to forward messages that require translation or - /// central processing by the Bridge, such as `SubmitShares` or `SetDownstreamTarget`. - /// The message is sent over the internal `tx_sv1_bridge` channel. - pub(super) async fn send_message_upstream( - self_: Arc>, - msg: DownstreamMessages, - ) -> ProxyResult<'static, ()> { - let sender = self_.safe_lock(|s| s.tx_sv1_bridge.clone()).unwrap(); - debug!("To Bridge: {:?}", msg); - let _ = sender.send(msg).await; - Ok(()) - } -} - -/// Implements `IsServer` for `Downstream` to handle the SV1 messages. -impl IsServer<'static> for Downstream { - /// Handles the incoming SV1 `mining.configure` message. - /// - /// This message is received after `mining.subscribe` and `mining.authorize`. - /// It allows the miner to negotiate capabilities, particularly regarding - /// version rolling. This method processes the version rolling mask and - /// minimum bit count provided by the client. - /// - /// Returns a tuple containing: - /// 1. `Option`: The version rolling parameters - /// negotiated by the server (proxy). - /// 2. `Option`: A boolean indicating whether the server (proxy) supports version rolling - /// (always `Some(false)` for TProxy according to the SV1 spec when not supporting work - /// selection). - fn handle_configure( - &mut self, - request: &client_to_server::Configure, - ) -> (Option, Option) { - info!("Down: Configuring"); - debug!("Down: Handling mining.configure: {:?}", &request); - - // TODO 0x1FFFE000 should be configured - // = 11111111111111110000000000000 - // this is a reasonable default as it allows all 16 version bits to be used - // If the tproxy/pool needs to use some version bits this needs to be configurable - // so upstreams can negotiate with downstreams. When that happens this should consider - // the min_bit_count in the mining.configure message - self.version_rolling_mask = request - .version_rolling_mask() - .map(|mask| HexU32Be(mask & 0x1FFFE000)); - self.version_rolling_min_bit = request.version_rolling_min_bit_count(); - - debug!( - "Negotiated version_rolling_mask is {:?}", - self.version_rolling_mask - ); - ( - Some(server_to_client::VersionRollingParams::new( - self.version_rolling_mask.clone().unwrap_or(HexU32Be(0)), - self.version_rolling_min_bit.clone().unwrap_or(HexU32Be(0)), - ).expect("Version mask invalid, automatic version mask selection not supported, please change it in carte::downstream_sv1::mod.rs")), - Some(false), - ) - } - - /// Handles the incoming SV1 `mining.subscribe` message. - /// - /// This is typically the first message received from a new client. In the SV1 - /// protocol, it's used to subscribe to job notifications and receive session - /// details like extranonce1 and extranonce2 size. This method acknowledges the subscription and - /// provides the necessary details derived from the upstream SV2 connection (extranonce1 and - /// extranonce2 size). It also provides subscription IDs for the - /// `mining.set_difficulty` and `mining.notify` methods. - fn handle_subscribe(&self, request: &client_to_server::Subscribe) -> Vec<(String, String)> { - info!("Down: Subscribing"); - debug!("Down: Handling mining.subscribe: {:?}", &request); - - let set_difficulty_sub = ( - "mining.set_difficulty".to_string(), - downstream_sv1::new_subscription_id(), - ); - let notify_sub = ( - "mining.notify".to_string(), - "ae6812eb4cd7735a302a8a9dd95cf71f".to_string(), - ); - - vec![set_difficulty_sub, notify_sub] - } - - /// Any numbers of workers may be authorized at any time during the session. In this way, a - /// large number of independent Mining Devices can be handled with a single SV1 connection. - /// https://bitcoin.stackexchange.com/questions/29416/how-do-pool-servers-handle-multiple-workers-sharing-one-connection-with-stratum - fn handle_authorize(&self, request: &client_to_server::Authorize) -> bool { - info!("Down: Authorizing"); - debug!("Down: Handling mining.authorize: {:?}", &request); - true - } - - /// Handles the incoming SV1 `mining.submit` message. - /// - /// This message is sent by the miner when they find a share that meets - /// their current difficulty target. It contains the job ID, ntime, nonce, - /// and extranonce2. - /// - /// This method processes the submitted share, potentially validates it - /// against the downstream target (although this might happen in the Bridge - /// or difficulty management logic), translates it into a - /// [`SubmitShareWithChannelId`], and sends it to the Bridge for - /// translation to SV2 and forwarding upstream if it meets the upstream target. - fn handle_submit(&self, request: &client_to_server::Submit<'static>) -> bool { - info!("Down: Submitting Share {:?}", request); - debug!("Down: Handling mining.submit: {:?}", &request); - - // TODO: Check if receiving valid shares by adding diff field to Downstream - - let to_send = SubmitShareWithChannelId { - channel_id: self.connection_id, - share: request.clone(), - extranonce: self.extranonce1.clone(), - extranonce2_len: self.extranonce2_len, - version_rolling_mask: self.version_rolling_mask.clone(), - }; - - self.tx_sv1_bridge - .try_send(DownstreamMessages::SubmitShares(to_send)) - .unwrap(); - - true - } - - /// Indicates to the server that the client supports the mining.set_extranonce method. - fn handle_extranonce_subscribe(&self) {} - - /// Checks if a Downstream role is authorized. - fn is_authorized(&self, name: &str) -> bool { - self.authorized_names.contains(&name.to_string()) - } - - /// Authorizes a Downstream role. - fn authorize(&mut self, name: &str) { - self.authorized_names.push(name.to_string()); - } - - /// Sets the `extranonce1` field sent in the SV1 `mining.notify` message to the value specified - /// by the SV2 `OpenExtendedMiningChannelSuccess` message sent from the Upstream role. - fn set_extranonce1( - &mut self, - _extranonce1: Option>, - ) -> Extranonce<'static> { - self.extranonce1.clone().try_into().unwrap() - } - - /// Returns the `Downstream`'s `extranonce1` value. - fn extranonce1(&self) -> Extranonce<'static> { - self.extranonce1.clone().try_into().unwrap() - } - - /// Sets the `extranonce2_size` field sent in the SV1 `mining.notify` message to the value - /// specified by the SV2 `OpenExtendedMiningChannelSuccess` message sent from the Upstream role. - fn set_extranonce2_size(&mut self, _extra_nonce2_size: Option) -> usize { - self.extranonce2_len - } - - /// Returns the `Downstream`'s `extranonce2_size` value. - fn extranonce2_size(&self) -> usize { - self.extranonce2_len - } - - /// Returns the version rolling mask. - fn version_rolling_mask(&self) -> Option { - self.version_rolling_mask.clone() - } - - /// Sets the version rolling mask. - fn set_version_rolling_mask(&mut self, mask: Option) { - self.version_rolling_mask = mask; - } - - /// Sets the minimum version rolling bit. - fn set_version_rolling_min_bit(&mut self, mask: Option) { - self.version_rolling_min_bit = mask - } - - fn notify(&mut self) -> Result { - unreachable!() - } -} - -#[cfg(test)] -mod tests { - use stratum_common::roles_logic_sv2::{codec_sv2::binary_sv2::U256, mining_sv2::Target}; - - use super::*; - - #[test] - fn gets_difficulty_from_target() { - let target = vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 255, 127, - 0, 0, 0, 0, 0, - ]; - let target_u256 = U256::Owned(target); - let target = Target::from(target_u256); - let actual = Downstream::difficulty_from_target(target).unwrap(); - let expect = 512.0; - assert_eq!(actual, expect); - } -} diff --git a/roles/translator/src/lib/downstream_sv1/mod.rs b/roles/translator/src/lib/downstream_sv1/mod.rs deleted file mode 100644 index a6190e911f..0000000000 --- a/roles/translator/src/lib/downstream_sv1/mod.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! ## Downstream SV1 Module -//! -//! This module defines the structures, messages, and utility functions -//! used for handling the downstream connection with SV1 mining clients. -//! -//! It includes definitions for messages exchanged with a Bridge component, -//! structures for submitting shares and updating targets, and constants -//! and functions for managing client interactions. -//! -//! The module is organized into the following sub-modules: -//! - [`diff_management`]: (Declared here, likely contains downstream difficulty logic) -//! - [`downstream`]: Defines the core [`Downstream`] struct and its functionalities. - -use stratum_common::roles_logic_sv2::mining_sv2::Target; -use v1::{client_to_server::Submit, utils::HexU32Be}; -pub mod diff_management; -pub mod downstream; -pub use downstream::Downstream; - -/// This constant defines a timeout duration. It is used to enforce -/// that clients sending a `mining.subscribe` message must follow up -/// with a `mining.authorize` within this period. This prevents -/// resource exhaustion attacks where clients open connections -/// with only `mining.subscribe` without intending to mine. -const SUBSCRIBE_TIMEOUT_SECS: u64 = 10; - -/// The messages that are sent from the downstream handling logic -/// to a central "Bridge" component for further processing. -#[derive(Debug)] -pub enum DownstreamMessages { - /// Represents a submitted share from a downstream miner, - /// wrapped with the relevant channel ID. - SubmitShares(SubmitShareWithChannelId), - /// Represents an update to the downstream target for a specific channel. - SetDownstreamTarget(SetDownstreamTarget), -} - -/// wrapper around a `mining.submit` with extra channel informationfor the Bridge to -/// process -#[derive(Debug)] -pub struct SubmitShareWithChannelId { - pub channel_id: u32, - pub share: Submit<'static>, - pub extranonce: Vec, - pub extranonce2_len: usize, - pub version_rolling_mask: Option, -} - -/// message for notifying the bridge that a downstream target has updated -/// so the Bridge can process the update -#[derive(Debug)] -pub struct SetDownstreamTarget { - pub channel_id: u32, - pub new_target: Target, -} - -/// This is just a wrapper function to send a message on the Downstream task shutdown channel -/// it does not matter what message is sent because the receiving ends should shutdown on any -/// message -pub async fn kill(sender: &async_channel::Sender) { - // safe to unwrap since the only way this can fail is if all receiving channels are dropped - // meaning all tasks have already dropped - sender.send(true).await.unwrap(); -} - -/// Generates a new, hardcoded string intended to be used as a subscription ID. -/// -/// FIXME -pub fn new_subscription_id() -> String { - "ae6812eb4cd7735a302a8a9dd95cf71f".into() -} diff --git a/roles/translator/src/lib/error.rs b/roles/translator/src/lib/error.rs index ab10c3c739..0a692d0447 100644 --- a/roles/translator/src/lib/error.rs +++ b/roles/translator/src/lib/error.rs @@ -10,56 +10,23 @@ use ext_config::ConfigError; use std::{fmt, sync::PoisonError}; -use stratum_common::roles_logic_sv2::{ - self, - codec_sv2::{self, binary_sv2, framing_sv2, Frame}, - mining_sv2::{ExtendedExtranonce, NewExtendedMiningJob, SetCustomMiningJob}, - parsers_sv2::{AnyMessage, Mining, ParserError}, - vardiff::error::VardiffError, -}; -use v1::server_to_client::{Notify, SetDifficulty}; +use tokio::sync::broadcast; +use v1::server_to_client::SetDifficulty; -pub type ProxyResult<'a, T> = core::result::Result>; - -/// Represents specific errors that can occur when sending messages over various -/// channels used within the translator. -/// -/// Each variant corresponds to a failure in sending a particular type of message -/// on its designated channel. #[derive(Debug)] -pub enum ChannelSendError<'a> { - /// Failure sending an SV2 `SubmitSharesExtended` message. - SubmitSharesExtended( - async_channel::SendError>, - ), - /// Failure sending an SV2 `SetNewPrevHash` message. - SetNewPrevHash(async_channel::SendError>), - /// Failure sending an SV2 `NewExtendedMiningJob` message. - NewExtendedMiningJob(async_channel::SendError>), - /// Failure broadcasting an SV1 `Notify` message - Notify(tokio::sync::broadcast::error::SendError>), - /// Failure sending a generic SV1 message. - V1Message(async_channel::SendError), - /// Represents a generic channel send failure, described by a string. - General(String), - /// Failure sending extranonce information. - Extranonce(async_channel::SendError<(ExtendedExtranonce, u32)>), - /// Failure sending an SV2 `SetCustomMiningJob` message. - SetCustomMiningJob( - async_channel::SendError>, - ), - /// Failure sending new template information (prevhash and coinbase). - NewTemplate( - async_channel::SendError<( - roles_logic_sv2::template_distribution_sv2::SetNewPrevHash<'a>, - Vec, - )>, - ), -} - -#[derive(Debug)] -pub enum Error<'a> { +pub enum TproxyError { + /// Error converting a vector to a fixed-size slice VecToSlice32(Vec), + /// Generic SV1 protocol error + SV1Error, + /// Error from the network helpers library + NetworkHelpersError(network_helpers_sv2::Error), + /// Error from the roles logic library + RolesSv2LogicError(roles_logic_sv2::Error), + /// Error from roles logic parser library + ParserError(roles_logic_sv2::parsers_sv2::ParserError), + /// Error from roles logic handlers Library + RolesSv2LogicHandlerError(roles_logic_sv2::handlers_sv2::HandlerError), /// Errors on bad CLI argument input. BadCliArgs, /// Errors on bad `serde_json` serialize/deserialize. @@ -78,35 +45,47 @@ pub enum Error<'a> { InvalidExtranonce(String), /// Errors on bad `String` to `int` conversion. ParseInt(std::num::ParseIntError), - /// Errors from `roles_logic_sv2` crate. - RolesSv2Logic(roles_logic_sv2::errors::Error), + /// Error parsing incoming upstream messages UpstreamIncoming(roles_logic_sv2::errors::Error), - /// SV1 protocol library error - V1Protocol(v1::error::Error<'a>), + /// Mining subprotocol error #[allow(dead_code)] SubprotocolMining(String), - // Locking Errors + /// Mutex poison lock error PoisonLock, - // Channel Receiver Error + /// Channel receiver error ChannelErrorReceiver(async_channel::RecvError), + /// Channel sender error + ChannelErrorSender, + /// Broadcast channel receiver error + BroadcastChannelErrorReceiver(broadcast::error::RecvError), + /// Tokio channel receiver error TokioChannelErrorRecv(tokio::sync::broadcast::error::RecvError), - // Channel Sender Errors - ChannelErrorSender(ChannelSendError<'a>), + /// Error converting SetDifficulty to Message SetDifficultyToMessage(SetDifficulty), - Infallible(std::convert::Infallible), - // used to handle SV2 protocol error messages from pool - #[allow(clippy::enum_variant_names)] - Sv2ProtocolError(Mining<'a>), + /// Target calculation error #[allow(clippy::enum_variant_names)] TargetError(roles_logic_sv2::errors::Error), + /// SV1 message exceeds maximum length Sv1MessageTooLong, - Parser(ParserError), + /// Received an unexpected message type + UnexpectedMessage, + /// Job not found during share validation + JobNotFound, + /// Invalid merkle root during share validation + InvalidMerkleRoot, + /// Shutdown signal received + Shutdown, + /// Represents a generic channel send failure, described by a string. + General(String), } -impl fmt::Display for Error<'_> { +impl std::error::Error for TproxyError {} + +impl fmt::Display for TproxyError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use Error::*; + use TproxyError::*; match self { + General(e) => write!(f, "{e}"), BadCliArgs => write!(f, "Bad CLI arg input"), BadSerdeJson(ref e) => write!(f, "Bad serde json: `{e:?}`"), BadConfigDeserialize(ref e) => write!(f, "Bad `config` TOML deserialize: `{e:?}`"), @@ -116,215 +95,127 @@ impl fmt::Display for Error<'_> { InvalidExtranonce(ref e) => write!(f, "Invalid Extranonce error: `{e:?}"), Io(ref e) => write!(f, "I/O error: `{e:?}"), ParseInt(ref e) => write!(f, "Bad convert from `String` to `int`: `{e:?}`"), - RolesSv2Logic(ref e) => write!(f, "Roles SV2 Logic Error: `{e:?}`"), - V1Protocol(ref e) => write!(f, "V1 Protocol Error: `{e:?}`"), SubprotocolMining(ref e) => write!(f, "Subprotocol Mining Error: `{e:?}`"), UpstreamIncoming(ref e) => write!(f, "Upstream parse incoming error: `{e:?}`"), PoisonLock => write!(f, "Poison Lock error"), ChannelErrorReceiver(ref e) => write!(f, "Channel receive error: `{e:?}`"), + BroadcastChannelErrorReceiver(ref e) => { + write!(f, "Broadcast channel receive error: {e:?}") + } + ChannelErrorSender => write!(f, "Sender error"), TokioChannelErrorRecv(ref e) => write!(f, "Channel receive error: `{e:?}`"), - ChannelErrorSender(ref e) => write!(f, "Channel send error: `{e:?}`"), SetDifficultyToMessage(ref e) => { write!(f, "Error converting SetDifficulty to Message: `{e:?}`") } VecToSlice32(ref e) => write!(f, "Standard Error: `{e:?}`"), - Infallible(ref e) => write!(f, "Infallible Error:`{e:?}`"), - Sv2ProtocolError(ref e) => { - write!(f, "Received Sv2 Protocol Error from upstream: `{e:?}`") - } TargetError(ref e) => { write!(f, "Impossible to get target from hashrate: `{e:?}`") } Sv1MessageTooLong => { write!(f, "Received an sv1 message that is longer than max len") } - Parser(ref e) => write!(f, "Parser error: `{e:?}`"), + UnexpectedMessage => { + write!(f, "Received a message type that was not expected") + } + JobNotFound => write!(f, "Job not found during share validation"), + InvalidMerkleRoot => write!(f, "Invalid merkle root during share validation"), + Shutdown => write!(f, "Shutdown signal"), + SV1Error => write!(f, "Sv1 error"), + NetworkHelpersError(ref e) => write!(f, "Network helpers error: {e:?}"), + RolesSv2LogicError(ref e) => write!(f, "Roles logic error: {e:?}"), + ParserError(ref e) => write!(f, "Roles logic parser error: {e:?}"), + RolesSv2LogicHandlerError(ref e) => write!(f, "Roles logic handler error: {e:?}"), } } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: binary_sv2::Error) -> Self { - Error::BinarySv2(e) + TproxyError::BinarySv2(e) } } -impl From for Error<'_> { - fn from(e: ParserError) -> Self { - Error::Parser(e) +impl From for TproxyError { + fn from(value: roles_logic_sv2::handlers_sv2::HandlerError) -> Self { + TproxyError::RolesSv2LogicHandlerError(value) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: codec_sv2::noise_sv2::Error) -> Self { - Error::CodecNoise(e) + TproxyError::CodecNoise(e) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: framing_sv2::Error) -> Self { - Error::FramingSv2(e) + TproxyError::FramingSv2(e) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: std::io::Error) -> Self { - Error::Io(e) + TproxyError::Io(e) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: std::num::ParseIntError) -> Self { - Error::ParseInt(e) + TproxyError::ParseInt(e) } } -impl From for Error<'_> { - fn from(e: roles_logic_sv2::errors::Error) -> Self { - Error::RolesSv2Logic(e) - } -} - -impl From for Error<'_> { +impl From for TproxyError { fn from(e: serde_json::Error) -> Self { - Error::BadSerdeJson(e) + TproxyError::BadSerdeJson(e) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: ConfigError) -> Self { - Error::BadConfigDeserialize(e) - } -} - -impl<'a> From> for Error<'a> { - fn from(e: v1::error::Error<'a>) -> Self { - Error::V1Protocol(e) + TproxyError::BadConfigDeserialize(e) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: async_channel::RecvError) -> Self { - Error::ChannelErrorReceiver(e) + TproxyError::ChannelErrorReceiver(e) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: tokio::sync::broadcast::error::RecvError) -> Self { - Error::TokioChannelErrorRecv(e) + TproxyError::TokioChannelErrorRecv(e) } } //*** LOCK ERRORS *** -impl From> for Error<'_> { +impl From> for TproxyError { fn from(_e: PoisonError) -> Self { - Error::PoisonLock - } -} - -// *** CHANNEL SENDER ERRORS *** -impl<'a> From>> - for Error<'a> -{ - fn from( - e: async_channel::SendError>, - ) -> Self { - Error::ChannelErrorSender(ChannelSendError::SubmitSharesExtended(e)) - } -} - -impl<'a> From>> - for Error<'a> -{ - fn from(e: async_channel::SendError>) -> Self { - Error::ChannelErrorSender(ChannelSendError::SetNewPrevHash(e)) + TproxyError::PoisonLock } } -impl<'a> From>> for Error<'a> { - fn from(e: tokio::sync::broadcast::error::SendError>) -> Self { - Error::ChannelErrorSender(ChannelSendError::Notify(e)) - } -} - -impl From> for Error<'_> { - fn from(e: async_channel::SendError) -> Self { - Error::ChannelErrorSender(ChannelSendError::V1Message(e)) - } -} - -impl From> for Error<'_> { - fn from(e: async_channel::SendError<(ExtendedExtranonce, u32)>) -> Self { - Error::ChannelErrorSender(ChannelSendError::Extranonce(e)) - } -} - -impl<'a> From>> for Error<'a> { - fn from(e: async_channel::SendError>) -> Self { - Error::ChannelErrorSender(ChannelSendError::NewExtendedMiningJob(e)) - } -} - -impl<'a> From>> for Error<'a> { - fn from(e: async_channel::SendError>) -> Self { - Error::ChannelErrorSender(ChannelSendError::SetCustomMiningJob(e)) - } -} - -impl<'a> - From< - async_channel::SendError<( - roles_logic_sv2::template_distribution_sv2::SetNewPrevHash<'a>, - Vec, - )>, - > for Error<'a> -{ - fn from( - e: async_channel::SendError<( - roles_logic_sv2::template_distribution_sv2::SetNewPrevHash<'a>, - Vec, - )>, - ) -> Self { - Error::ChannelErrorSender(ChannelSendError::NewTemplate(e)) - } -} - -impl From> for Error<'_> { +impl From> for TproxyError { fn from(e: Vec) -> Self { - Error::VecToSlice32(e) + TproxyError::VecToSlice32(e) } } -impl From for Error<'_> { +impl From for TproxyError { fn from(e: SetDifficulty) -> Self { - Error::SetDifficultyToMessage(e) - } -} - -impl From for Error<'_> { - fn from(e: std::convert::Infallible) -> Self { - Error::Infallible(e) - } -} - -impl<'a> From> for Error<'a> { - fn from(e: Mining<'a>) -> Self { - Error::Sv2ProtocolError(e) + TproxyError::SetDifficultyToMessage(e) } } -impl From, codec_sv2::buffer_sv2::Slice>>> - for Error<'_> -{ - fn from( - value: async_channel::SendError, codec_sv2::buffer_sv2::Slice>>, - ) -> Self { - Error::ChannelErrorSender(ChannelSendError::General(value.to_string())) +impl<'a> From> for TproxyError { + fn from(_: v1::error::Error<'a>) -> Self { + TproxyError::SV1Error } } -impl From for Error<'_> { - fn from(value: VardiffError) -> Self { - Self::RolesSv2Logic(value.into()) +impl From for TproxyError { + fn from(value: network_helpers_sv2::Error) -> Self { + TproxyError::NetworkHelpersError(value) } } diff --git a/roles/translator/src/lib/mod.rs b/roles/translator/src/lib/mod.rs index 4f4f2bba88..d1cae586ef 100644 --- a/roles/translator/src/lib/mod.rs +++ b/roles/translator/src/lib/mod.rs @@ -10,43 +10,37 @@ //! provides the `start` method as the main entry point for running the translator service. //! It relies on several sub-modules (`config`, `downstream_sv1`, `upstream_sv2`, `proxy`, `status`, //! etc.) for specialized functionalities. -use async_channel::{bounded, unbounded}; -use futures::FutureExt; -use rand::Rng; -use status::Status; -use std::{ - net::{IpAddr, SocketAddr}, - str::FromStr, - sync::Arc, -}; -pub use stratum_common::roles_logic_sv2::utils::Mutex; - -use tokio::{ - select, - sync::{broadcast, Notify}, - task::{self, AbortHandle}, -}; +#![allow(clippy::module_inception)] +use async_channel::unbounded; +pub use roles_logic_sv2::utils::Mutex; +use std::{net::SocketAddr, sync::Arc}; +use tokio::sync::mpsc; use tracing::{debug, error, info, warn}; + pub use v1::server_to_client; use config::TranslatorConfig; -use crate::status::State; +use crate::{ + status::{State, Status}, + sv1::sv1_server::sv1_server::Sv1Server, + sv2::{channel_manager::ChannelMode, ChannelManager, Upstream}, + task_manager::TaskManager, + utils::ShutdownMessage, +}; pub mod config; -pub mod downstream_sv1; pub mod error; -pub mod proxy; pub mod status; -pub mod upstream_sv2; +pub mod sv1; +pub mod sv2; +mod task_manager; pub mod utils; /// The main struct that manages the SV1/SV2 translator. #[derive(Clone, Debug)] pub struct TranslatorSv2 { config: TranslatorConfig, - reconnect_wait_time: u64, - shutdown: Arc, } impl TranslatorSv2 { @@ -55,13 +49,7 @@ impl TranslatorSv2 { /// Initializes the translator with the given configuration and sets up /// the reconnect wait time. pub fn new(config: TranslatorConfig) -> Self { - let mut rng = rand::thread_rng(); - let wait_time = rng.gen_range(0..=3000); - Self { - config, - reconnect_wait_time: wait_time, - shutdown: Arc::new(Notify::new()), - } + Self { config } } /// Starts the translator. @@ -69,319 +57,206 @@ impl TranslatorSv2 { /// This method starts the main event loop, which handles connections, /// protocol translation, job management, and status reporting. pub async fn start(self) { - // Status channel for components to signal errors or state changes. - let (tx_status, rx_status) = unbounded(); - - // Shared mutable state for the current mining target. - let target = Arc::new(Mutex::new(vec![0; 32])); + info!("TranslatorSv2 starting... setting up subsystems"); + + let (notify_shutdown, _) = tokio::sync::broadcast::channel::(1); + let (shutdown_complete_tx, mut shutdown_complete_rx) = mpsc::channel::<()>(1); + let task_manager = Arc::new(TaskManager::new()); + + let (status_sender, status_receiver) = async_channel::unbounded::(); + + let (channel_manager_to_upstream_sender, channel_manager_to_upstream_receiver) = + unbounded(); + let (upstream_to_channel_manager_sender, upstream_to_channel_manager_receiver) = + unbounded(); + let (channel_manager_to_sv1_server_sender, channel_manager_to_sv1_server_receiver) = + unbounded(); + let (sv1_server_to_channel_manager_sender, sv1_server_to_channel_manager_receiver) = + unbounded(); + + debug!("Channels initialized."); + + let upstream_addresses = self + .config + .upstreams + .iter() + .map(|upstream| { + let upstream_addr = + SocketAddr::new(upstream.address.parse().unwrap(), upstream.port); + (upstream_addr, upstream.authority_pubkey) + }) + .collect::>(); + + info!("Attempting to initialize upstream..."); + let upstream = match Upstream::new( + &upstream_addresses, + upstream_to_channel_manager_sender.clone(), + channel_manager_to_upstream_receiver.clone(), + notify_shutdown.clone(), + shutdown_complete_tx.clone(), + ) + .await + { + Ok(upstream) => { + info!("Upstream initialized successfully."); + upstream + } + Err(e) => { + error!("Failed to initialize upstream connection: {e:?}"); + return; + } + }; - // Broadcast channel to send SV1 `mining.notify` messages from the Bridge - // to all connected Downstream (SV1) clients. - let (tx_sv1_notify, _rx_sv1_notify): ( - broadcast::Sender, - broadcast::Receiver, - ) = broadcast::channel(10); + info!("Initializing channel manager..."); + let channel_manager = Arc::new(ChannelManager::new( + channel_manager_to_upstream_sender, + upstream_to_channel_manager_receiver, + channel_manager_to_sv1_server_sender.clone(), + sv1_server_to_channel_manager_receiver, + if self.config.aggregate_channels { + ChannelMode::Aggregated + } else { + ChannelMode::NonAggregated + }, + )); - // FIXME: Remove this task collector mechanism. - // Collector for holding handles to spawned tasks for potential abortion. - let task_collector: Arc>> = - Arc::new(Mutex::new(Vec::new())); + info!("Setting up SV1 server..."); + let downstream_addr: SocketAddr = SocketAddr::new( + self.config.downstream_address.parse().unwrap(), + self.config.downstream_port, + ); - // Delegate initial setup and connection logic to internal_start. - Self::internal_start( + let sv1_server = Arc::new(Sv1Server::new( + downstream_addr, + channel_manager_to_sv1_server_receiver, + sv1_server_to_channel_manager_sender, self.config.clone(), - tx_sv1_notify.clone(), - target.clone(), - tx_status.clone(), - task_collector.clone(), + )); + + info!("Spawning channel manager background tasks..."); + ChannelManager::run_channel_manager_tasks( + channel_manager.clone(), + notify_shutdown.clone(), + shutdown_complete_tx.clone(), + status_sender.clone(), + task_manager.clone(), ) .await; - debug!("Starting up signal listener"); - let task_collector_ = task_collector.clone(); - - debug!("Starting up status listener"); - let wait_time = self.reconnect_wait_time; - // Check all tasks if is_finished() is true, if so exit - // Spawn a task to listen for Ctrl+C signal. - tokio::spawn({ - let shutdown_signal = self.shutdown.clone(); - async move { - if tokio::signal::ctrl_c().await.is_ok() { - info!("Interrupt received"); - // Notify the main loop to begin shutdown. - shutdown_signal.notify_one(); - } - } - }); - - // Main status loop. - loop { - select! { - // Listen for status updates from components. - task_status = rx_status.recv().fuse() => { - if let Ok(task_status_) = task_status { - match task_status_.state { - // If any critical component shuts down due to error, shut down the whole translator. - // Logic needs to be improved, maybe respawn rather than a total shutdown. - State::DownstreamShutdown(err) | State::BridgeShutdown(err) | State::UpstreamShutdown(err) => { - error!("SHUTDOWN from: {}", err); - self.shutdown(); - } - // If the upstream signals a need to reconnect. - State::UpstreamTryReconnect(err) => { - error!("Trying to reconnect the Upstream because of: {}", err); - let task_collector1 = task_collector_.clone(); - let tx_sv1_notify1 = tx_sv1_notify.clone(); - let target = target.clone(); - let tx_status = tx_status.clone(); - let proxy_config = self.config.clone(); - // Spawn a new task to handle the reconnection process. - tokio::spawn (async move { - // Wait for the randomized delay to avoid thundering herd issues. - tokio::time::sleep(std::time::Duration::from_millis(wait_time)).await; - - // Abort all existing tasks before restarting. - let task_collector_aborting = task_collector1.clone(); - kill_tasks(task_collector_aborting.clone()); + info!("Starting upstream listener task..."); + if let Err(e) = upstream + .start( + notify_shutdown.clone(), + shutdown_complete_tx.clone(), + status_sender.clone(), + task_manager.clone(), + ) + .await + { + error!("Failed to start upstream listener: {e:?}"); + return; + } - warn!("Trying reconnecting to upstream"); - // Restart the internal components. - Self::internal_start( - proxy_config, - tx_sv1_notify1, - target.clone(), - tx_status.clone(), - task_collector1, - ) - .await; - }); - } - // Log healthy status messages. - State::Healthy(msg) => { - info!("HEALTHY message: {}", msg); + info!("Spawning status listener task..."); + let notify_shutdown_clone = notify_shutdown.clone(); + let shutdown_complete_tx_clone = shutdown_complete_tx.clone(); + let status_sender_clone = status_sender.clone(); + let task_manager_clone = task_manager.clone(); + task_manager.spawn(async move { + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Ctrl+C received — initiating graceful shutdown..."); + notify_shutdown_clone.send(ShutdownMessage::ShutdownAll).unwrap(); + break; + } + message = status_receiver.recv() => { + if let Ok(status) = message { + match status.state { + State::DownstreamShutdown{downstream_id,..} => { + warn!("Downstream {downstream_id:?} disconnected — notifying SV1 server."); + notify_shutdown_clone.send(ShutdownMessage::DownstreamShutdown(downstream_id)).unwrap(); + } + State::Sv1ServerShutdown(_) => { + warn!("SV1 Server shutdown requested — initiating full shutdown."); + notify_shutdown_clone.send(ShutdownMessage::ShutdownAll).unwrap(); + break; + } + State::ChannelManagerShutdown(_) => { + warn!("Channel Manager shutdown requested — initiating full shutdown."); + notify_shutdown_clone.send(ShutdownMessage::ShutdownAll).unwrap(); + break; + } + State::UpstreamShutdown(msg) => { + warn!("Upstream connection dropped: {msg:?} — attempting reconnection..."); + + match Upstream::new( + &upstream_addresses, + upstream_to_channel_manager_sender.clone(), + channel_manager_to_upstream_receiver.clone(), + notify_shutdown_clone.clone(), + shutdown_complete_tx_clone.clone(), + ).await { + Ok(upstream) => { + if let Err(e) = upstream + .start( + notify_shutdown_clone.clone(), + shutdown_complete_tx_clone.clone(), + status_sender_clone.clone(), + task_manager_clone.clone() + ) + .await + { + error!("Restarted upstream failed to start: {e:?}"); + notify_shutdown_clone.send(ShutdownMessage::ShutdownAll).unwrap(); + break; + } else { + info!("Upstream restarted successfully."); + notify_shutdown_clone.send(ShutdownMessage::DownstreamShutdownAll).unwrap(); + } + } + Err(e) => { + error!("Failed to reinitialize upstream after disconnect: {e:?}"); + notify_shutdown_clone.send(ShutdownMessage::ShutdownAll).unwrap(); + break; + } + } + } } } - } else { - info!("Channel closed"); - kill_tasks(task_collector.clone()); - break; // Channel closed } } - // Listen for the shutdown signal (from Ctrl+C or explicit call). - _ = self.shutdown.notified() => { - info!("Shutting down gracefully..."); - kill_tasks(task_collector.clone()); - break; - } } - } - } - - /// Internal helper function to initialize and start the core components. - /// - /// Sets up communication channels between the Bridge, Upstream, and Downstream. - /// Creates, connects, and starts the Upstream (SV2) handler. - /// Waits for initial data (extranonce, target) from the Upstream. - /// Creates and starts the Bridge (protocol translation logic). - /// Starts the Downstream (SV1) listener to accept miner connections. - /// Collects task handles for graceful shutdown management. - async fn internal_start( - proxy_config: TranslatorConfig, - tx_sv1_notify: broadcast::Sender>, - target: Arc>>, - tx_status: async_channel::Sender>, - task_collector: Arc>>, - ) { - // Channel: Bridge -> Upstream (SV2 SubmitSharesExtended) - let (tx_sv2_submit_shares_ext, rx_sv2_submit_shares_ext) = bounded(10); - - // Channel: Downstream -> Bridge (SV1 Messages) - let (tx_sv1_bridge, rx_sv1_downstream) = unbounded(); - - // Channel: Upstream -> Bridge (SV2 NewExtendedMiningJob) - let (tx_sv2_new_ext_mining_job, rx_sv2_new_ext_mining_job) = bounded(10); - - // Channel: Upstream -> internal_start -> Bridge (Initial Extranonce) - let (tx_sv2_extranonce, rx_sv2_extranonce) = bounded(1); - - // Channel: Upstream -> Bridge (SV2 SetNewPrevHash) - let (tx_sv2_set_new_prev_hash, rx_sv2_set_new_prev_hash) = bounded(10); - - // Prepare upstream connection address. - let upstream_addr = SocketAddr::new( - IpAddr::from_str(&proxy_config.upstream_address) - .expect("Failed to parse upstream address!"), - proxy_config.upstream_port, - ); + }); - // Shared difficulty configuration - let diff_config = Arc::new(Mutex::new(proxy_config.upstream_difficulty_config.clone())); - let task_collector_upstream = task_collector.clone(); - // Instantiate the Upstream (SV2) component. - let upstream = match upstream_sv2::Upstream::new( - upstream_addr, - proxy_config.upstream_authority_pubkey, - rx_sv2_submit_shares_ext, // Receives shares from Bridge - tx_sv2_set_new_prev_hash, // Sends prev hash updates to Bridge - tx_sv2_new_ext_mining_job, // Sends new jobs to Bridge - proxy_config.min_extranonce2_size, - tx_sv2_extranonce, // Sends initial extranonce - status::Sender::Upstream(tx_status.clone()), // Sends status updates - target.clone(), // Shares target state - diff_config.clone(), // Shares difficulty config - task_collector_upstream, + info!("Starting SV1 server..."); + if let Err(e) = Sv1Server::start( + sv1_server, + notify_shutdown.clone(), + shutdown_complete_tx.clone(), + status_sender.clone(), + task_manager.clone(), ) .await { - Ok(upstream) => upstream, - Err(e) => { - // FIXME: Send error to status main loop, and then exit. - error!("Failed to create upstream: {}", e); - return; - } - }; - let task_collector_init_task = task_collector.clone(); - - // Spawn the core initialization logic in a separate task. - // This allows the main `start` loop to remain responsive to shutdown signals - // even during potentially long-running connection attempts. - let task = task::spawn(async move { - // Connect to the SV2 Upstream role - match upstream_sv2::Upstream::connect( - upstream.clone(), - proxy_config.min_supported_version, - proxy_config.max_supported_version, - ) - .await - { - Ok(_) => info!("Connected to Upstream!"), - Err(e) => { - // FIXME: Send error to status main loop, and then exit. - error!("Failed to connect to Upstream EXITING! : {}", e); - return; - } - } - - // Start the task to parse incoming messages from the Upstream. - if let Err(e) = upstream_sv2::Upstream::parse_incoming(upstream.clone()) { - error!("failed to create sv2 parser: {}", e); - return; - } + error!("SV1 server startup failed: {e:?}"); + notify_shutdown.send(ShutdownMessage::ShutdownAll).unwrap(); + } - debug!("Finished starting upstream listener"); - // Start the task handler to process share submissions received from the Bridge. - if let Err(e) = upstream_sv2::Upstream::handle_submit(upstream.clone()) { - error!("Failed to create submit handler: {}", e); - return; + drop(shutdown_complete_tx); + info!("Waiting for shutdown completion signals from subsystems..."); + let shutdown_timeout = tokio::time::Duration::from_secs(30); + tokio::select! { + _ = shutdown_complete_rx.recv() => { + info!("All subsystems reported shutdown complete."); } - - // Wait to receive the initial extranonce information from the Upstream. - // This is needed before the Bridge can be fully initialized. - let (extended_extranonce, up_id) = rx_sv2_extranonce.recv().await.unwrap(); - loop { - let target: [u8; 32] = target.safe_lock(|t| t.clone()).unwrap().try_into().unwrap(); - if target != [0; 32] { - break; - }; - tokio::time::sleep(std::time::Duration::from_millis(100)).await; + _ = tokio::time::sleep(shutdown_timeout) => { + warn!("Graceful shutdown timed out after {shutdown_timeout:?} — forcing shutdown."); + task_manager.abort_all().await; } - - let task_collector_bridge = task_collector_init_task.clone(); - // Instantiate the Bridge component. - let b = proxy::Bridge::new( - rx_sv1_downstream, - tx_sv2_submit_shares_ext, - rx_sv2_set_new_prev_hash, - rx_sv2_new_ext_mining_job, - tx_sv1_notify.clone(), - status::Sender::Bridge(tx_status.clone()), - extended_extranonce, - target, - up_id, - task_collector_bridge, - ); - // Start the Bridge's main processing loop. - proxy::Bridge::start(b.clone()); - - // Prepare downstream listening address. - let downstream_addr = SocketAddr::new( - IpAddr::from_str(&proxy_config.downstream_address).unwrap(), - proxy_config.downstream_port, - ); - - let task_collector_downstream = task_collector_init_task.clone(); - // Start accepting connections from Downstream (SV1) miners. - downstream_sv1::Downstream::accept_connections( - downstream_addr, - tx_sv1_bridge, - tx_sv1_notify, - status::Sender::DownstreamListener(tx_status.clone()), - b, - proxy_config.downstream_difficulty_config, - diff_config, - task_collector_downstream, - ); - }); // End of init task - let _ = - task_collector.safe_lock(|t| t.push((task.abort_handle(), "init task".to_string()))); - } - - /// Closes Translator role and any open connection associated with it. - /// - /// Note that this method will result in a full exit of the running - /// Translator and any open connection most be re-initiated upon new - /// start. - pub fn shutdown(&self) { - self.shutdown.notify_one(); - } -} - -// Helper function to iterate through the collected task handles and abort them -fn kill_tasks(task_collector: Arc>>) { - let _ = task_collector.safe_lock(|t| { - while let Some(handle) = t.pop() { - handle.0.abort(); - warn!("Killed task: {:?}", handle.1); } - }); -} - -#[cfg(test)] -mod tests { - use super::TranslatorSv2; - use ext_config::{Config, File, FileFormat}; - - use crate::*; - - #[tokio::test] - async fn test_shutdown() { - let config_path = "config-examples/tproxy-config-hosted-pool-example.toml"; - let config: TranslatorConfig = match Config::builder() - .add_source(File::new(config_path, FileFormat::Toml)) - .build() - { - Ok(settings) => match settings.try_deserialize::() { - Ok(c) => c, - Err(e) => { - dbg!(&e); - return; - } - }, - Err(e) => { - dbg!(&e); - return; - } - }; - let translator = TranslatorSv2::new(config.clone()); - let cloned = translator.clone(); - tokio::spawn(async move { - cloned.start().await; - }); - translator.shutdown(); - let ip = config.downstream_address.clone(); - let port = config.downstream_port; - let translator_addr = format!("{ip}:{port}"); - assert!(std::net::TcpListener::bind(translator_addr).is_ok()); + info!("Joining remaining tasks..."); + task_manager.join_all().await; + info!("TranslatorSv2 shutdown complete."); } } diff --git a/roles/translator/src/lib/proxy/bridge.rs b/roles/translator/src/lib/proxy/bridge.rs deleted file mode 100644 index 5a9f32e4de..0000000000 --- a/roles/translator/src/lib/proxy/bridge.rs +++ /dev/null @@ -1,653 +0,0 @@ -//! ## Proxy Bridge Module -//! -//! This module defines the [`Bridge`] structure, which acts as the central component -//! responsible for translating messages and coordinating communication between -//! the upstream SV2 role and the downstream SV1 mining clients. -//! -//! The Bridge manages message queues, maintains the state required for translation -//! (such as job IDs, previous hashes, and mining jobs), handles share submissions -//! from downstream, and forwards translated jobs received from upstream to downstream miners. -//! -//! This module handles: -//! - Receiving SV1 `mining.submit` messages from [`Downstream`] connections. -//! - Translating SV1 submits into SV2 `SubmitSharesExtended`. -//! - Receiving SV2 `SetNewPrevHash` and `NewExtendedMiningJob` from the upstream. -//! - Translating SV2 job messages into SV1 `mining.notify` messages. -//! - Sending translated SV2 submits to the upstream. -//! - Broadcasting translated SV1 notifications to connected downstream miners. -//! - Managing channel state and difficulty related to job translation. -//! - Handling new downstream SV1 connections. -use super::super::{ - downstream_sv1::{DownstreamMessages, SetDownstreamTarget, SubmitShareWithChannelId}, - error::{ - Error::{self, PoisonLock}, - ProxyResult, - }, - status, -}; -use async_channel::{Receiver, Sender}; -use error_handling::handle_result; -use std::sync::Arc; -use stratum_common::roles_logic_sv2::{ - channel_logic::channel_factory::{ - ExtendedChannelKind, OnNewShare, ProxyExtendedChannelFactory, Share, - }, - mining_sv2::{ - ExtendedExtranonce, NewExtendedMiningJob, SetNewPrevHash, SubmitSharesExtended, Target, - }, - parsers_sv2::Mining, - utils::{GroupId, Mutex}, - Error as RolesLogicError, -}; -use tokio::{sync::broadcast, task::AbortHandle}; -use tracing::{debug, error, info, warn}; -use v1::{client_to_server::Submit, server_to_client, utils::HexU32Be}; - -/// Bridge between the SV2 `Upstream` and SV1 `Downstream` responsible for the following messaging -/// translation: -/// 1. SV1 `mining.submit` -> SV2 `SubmitSharesExtended` -/// 2. SV2 `SetNewPrevHash` + `NewExtendedMiningJob` -> SV1 `mining.notify` -#[derive(Debug)] -pub struct Bridge { - /// Receives a SV1 `mining.submit` message from the Downstream role. - rx_sv1_downstream: Receiver, - /// Sends SV2 `SubmitSharesExtended` messages translated from SV1 `mining.submit` messages to - /// the `Upstream`. - tx_sv2_submit_shares_ext: Sender>, - /// Receives a SV2 `SetNewPrevHash` message from the `Upstream` to be translated (along with a - /// SV2 `NewExtendedMiningJob` message) to a SV1 `mining.submit` for the `Downstream`. - rx_sv2_set_new_prev_hash: Receiver>, - /// Receives a SV2 `NewExtendedMiningJob` message from the `Upstream` to be translated (along - /// with a SV2 `SetNewPrevHash` message) to a SV1 `mining.submit` to be sent to the - /// `Downstream`. - rx_sv2_new_ext_mining_job: Receiver>, - /// Sends SV1 `mining.notify` message (translated from the SV2 `SetNewPrevHash` and - /// `NewExtendedMiningJob` messages stored in the `NextMiningNotify`) to the `Downstream`. - tx_sv1_notify: broadcast::Sender>, - /// Allows the bridge the ability to communicate back to the main thread any status updates - /// that would interest the main thread for error handling - tx_status: status::Sender, - /// Stores the most recent SV1 `mining.notify` values to be sent to the `Downstream` upon - /// receiving a new SV2 `SetNewPrevHash` and `NewExtendedMiningJob` messages **before** any - /// Downstream role connects to the proxy. - /// - /// Once the proxy establishes a connection with the SV2 Upstream role, it immediately receives - /// a SV2 `SetNewPrevHash` and `NewExtendedMiningJob` message. This happens before the - /// connection to the Downstream role(s) occur. The `last_notify` member fields allows these - /// first notify values to be relayed to the `Downstream` once a Downstream role connects. Once - /// a Downstream role connects and receives the first notify values, this member field is no - /// longer used. - last_notify: Option>, - pub(self) channel_factory: ProxyExtendedChannelFactory, - /// Stores `NewExtendedMiningJob` messages received from the upstream with the `is_future` flag - /// set. These jobs are buffered until a corresponding `SetNewPrevHash` message is - /// received. - future_jobs: Vec>, - /// Stores the last received SV2 `SetNewPrevHash` message. Used in conjunction with - /// `future_jobs` to construct `mining.notify` messages. - last_p_hash: Option>, - /// The mining target currently in use by the downstream miners connected to this bridge. - /// This target is derived from the upstream's requirements but may be adjusted locally. - target: Arc>>, - /// The job ID of the last sent `mining.notify` message. - last_job_id: u32, - task_collector: Arc>>, -} - -impl Bridge { - #[allow(clippy::too_many_arguments)] - /// Instantiates a new `Bridge` with the provided communication channels and initial - /// configurations. - /// - /// Sets up the core communication pathways between upstream and downstream handlers - /// and initializes the internal state, including the channel factory. - pub fn new( - rx_sv1_downstream: Receiver, - tx_sv2_submit_shares_ext: Sender>, - rx_sv2_set_new_prev_hash: Receiver>, - rx_sv2_new_ext_mining_job: Receiver>, - tx_sv1_notify: broadcast::Sender>, - tx_status: status::Sender, - extranonces: ExtendedExtranonce, - target: Arc>>, - up_id: u32, - task_collector: Arc>>, - ) -> Arc> { - let ids = Arc::new(Mutex::new(GroupId::new())); - let share_per_min = 1.0; - let upstream_target: [u8; 32] = - target.safe_lock(|t| t.clone()).unwrap().try_into().unwrap(); - let upstream_target: Target = upstream_target.into(); - Arc::new(Mutex::new(Self { - rx_sv1_downstream, - tx_sv2_submit_shares_ext, - rx_sv2_set_new_prev_hash, - rx_sv2_new_ext_mining_job, - tx_sv1_notify, - tx_status, - last_notify: None, - channel_factory: ProxyExtendedChannelFactory::new( - ids, - extranonces, - None, - share_per_min, - ExtendedChannelKind::Proxy { upstream_target }, - None, - up_id, - ), - future_jobs: vec![], - last_p_hash: None, - target, - last_job_id: 0, - task_collector, - })) - } - - /// Handles the event of a new SV1 downstream client connecting. - /// - /// Creates a new extended channel using the internal `channel_factory` for the - /// new connection. It assigns a unique channel ID, determines the initial - /// extranonce and target for the miner, and provides the last known - /// `mining.notify` message to immediately send to the new client. - #[allow(clippy::result_large_err)] - pub fn on_new_sv1_connection( - &mut self, - hash_rate: f32, - ) -> ProxyResult<'static, OpenSv1Downstream> { - match self.channel_factory.new_extended_channel(0, hash_rate, 0) { - Ok(messages) => { - for message in messages { - match message { - Mining::OpenExtendedMiningChannelSuccess(success) => { - let extranonce = success.extranonce_prefix.to_vec(); - let extranonce2_len = success.extranonce_size; - self.target.safe_lock(|t| *t = success.target.to_vec())?; - return Ok(OpenSv1Downstream { - channel_id: success.channel_id, - last_notify: self.last_notify.clone(), - extranonce, - target: self.target.clone(), - extranonce2_len, - }); - } - Mining::OpenMiningChannelError(_) => todo!(), - Mining::SetNewPrevHash(_) => (), - Mining::NewExtendedMiningJob(_) => (), - _ => unreachable!(), - } - } - } - Err(_) => { - return Err(Error::SubprotocolMining( - "Bridge: failed to open new extended channel".to_string(), - )) - } - }; - Err(Error::SubprotocolMining( - "Bridge: Invalid mining message when opening downstream connection".to_string(), - )) - } - - /// Starts the tasks responsible for receiving and processing - /// messages from both upstream SV2 and downstream SV1 connections. - /// - /// This function spawns three main tasks: - /// 1. `handle_new_prev_hash`: Listens for SV2 `SetNewPrevHash` messages. - /// 2. `handle_new_extended_mining_job`: Listens for SV2 `NewExtendedMiningJob` messages. - /// 3. `handle_downstream_messages`: Listens for `DownstreamMessages` (e.g., submit shares) from - /// downstream clients. - pub fn start(self_: Arc>) { - Self::handle_new_prev_hash(self_.clone()); - Self::handle_new_extended_mining_job(self_.clone()); - Self::handle_downstream_messages(self_); - } - - /// Task handler that receives `DownstreamMessages` and dispatches them. - /// - /// This loop continuously receives messages from the `rx_sv1_downstream` channel. - /// It matches on the `DownstreamMessages` variant and calls the appropriate - /// handler function (`handle_submit_shares` or `handle_update_downstream_target`). - fn handle_downstream_messages(self_: Arc>) { - let task_collector_handle_downstream = - self_.safe_lock(|b| b.task_collector.clone()).unwrap(); - let (rx_sv1_downstream, tx_status) = self_ - .safe_lock(|s| (s.rx_sv1_downstream.clone(), s.tx_status.clone())) - .unwrap(); - let handle_downstream = tokio::task::spawn(async move { - loop { - let msg = handle_result!(tx_status, rx_sv1_downstream.clone().recv().await); - - match msg { - DownstreamMessages::SubmitShares(share) => { - handle_result!( - tx_status, - Self::handle_submit_shares(self_.clone(), share).await - ); - } - DownstreamMessages::SetDownstreamTarget(new_target) => { - handle_result!( - tx_status, - Self::handle_update_downstream_target(self_.clone(), new_target) - ); - } - }; - } - }); - let _ = task_collector_handle_downstream.safe_lock(|a| { - a.push(( - handle_downstream.abort_handle(), - "handle_downstream_message".to_string(), - )) - }); - } - - /// Receives a `SetDownstreamTarget` message and updates the downstream target for a specific - /// channel. - /// - /// This function is called when the downstream logic determines that a miner's - /// target needs to be updated (e.g., due to difficulty adjustment). It updates - /// the target within the internal `channel_factory` for the specified channel ID. - #[allow(clippy::result_large_err)] - fn handle_update_downstream_target( - self_: Arc>, - new_target: SetDownstreamTarget, - ) -> ProxyResult<'static, ()> { - self_.safe_lock(|b| { - b.channel_factory - .update_target_for_channel(new_target.channel_id, new_target.new_target); - })?; - Ok(()) - } - /// Receives a `SubmitShareWithChannelId` message from a downstream miner, - /// validates the share, and sends it upstream if it meets the upstream target. - async fn handle_submit_shares( - self_: Arc>, - share: SubmitShareWithChannelId, - ) -> ProxyResult<'static, ()> { - let (tx_sv2_submit_shares_ext, target_mutex, tx_status) = self_.safe_lock(|s| { - ( - s.tx_sv2_submit_shares_ext.clone(), - s.target.clone(), - s.tx_status.clone(), - ) - })?; - let upstream_target: [u8; 32] = target_mutex.safe_lock(|t| t.clone())?.try_into()?; - let mut upstream_target: Target = upstream_target.into(); - self_.safe_lock(|s| s.channel_factory.set_target(&mut upstream_target))?; - - let sv2_submit = self_.safe_lock(|s| { - s.translate_submit(share.channel_id, share.share, share.version_rolling_mask) - })??; - let res = self_ - .safe_lock(|s| s.channel_factory.on_submit_shares_extended(sv2_submit)) - .map_err(|_| PoisonLock); - - match res { - Ok(Ok(OnNewShare::SendErrorDownstream(e))) => { - warn!( - "Submit share error {:?}", - std::str::from_utf8(&e.error_code.to_vec()[..]) - ); - } - Ok(Ok(OnNewShare::SendSubmitShareUpstream((share, _)))) => { - info!("SHARE MEETS UPSTREAM TARGET"); - match share { - Share::Extended(share) => { - tx_sv2_submit_shares_ext.send(share).await?; - } - // We are in an extended channel shares are extended - Share::Standard(_) => unreachable!(), - } - } - // We are in an extended channel this variant is group channle only - Ok(Ok(OnNewShare::RelaySubmitShareUpstream)) => unreachable!(), - Ok(Ok(OnNewShare::ShareMeetDownstreamTarget)) => { - debug!("SHARE MEETS DOWNSTREAM TARGET"); - } - // Proxy do not have JD capabilities - Ok(Ok(OnNewShare::ShareMeetBitcoinTarget(..))) => unreachable!(), - Ok(Err(e)) => error!("Error: {:?}", e), - Err(e) => { - let _ = tx_status - .send(status::Status { - state: status::State::BridgeShutdown(e), - }) - .await; - } - } - Ok(()) - } - - /// Translates a SV1 `mining.submit` message into an SV2 `SubmitSharesExtended` message. - /// - /// This function performs the necessary transformations to convert the data - /// format used by SV1 submissions (`job_id`, `nonce`, `time`, `extra_nonce2`, - /// `version_bits`) into the SV2 `SubmitSharesExtended` structure, - /// taking into account version rolling if a mask is provided. - #[allow(clippy::result_large_err)] - fn translate_submit( - &self, - channel_id: u32, - sv1_submit: Submit, - version_rolling_mask: Option, - ) -> ProxyResult<'static, SubmitSharesExtended<'static>> { - let last_version = self - .channel_factory - .last_valid_job_version() - .ok_or(Error::RolesSv2Logic(RolesLogicError::NoValidJob))?; - let version = match (sv1_submit.version_bits, version_rolling_mask) { - // regarding version masking see https://github.com/slushpool/stratumprotocol/blob/master/stratum-extensions.mediawiki#changes-in-request-miningsubmit - (Some(vb), Some(mask)) => (last_version & !mask.0) | (vb.0 & mask.0), - (None, None) => last_version, - _ => return Err(Error::V1Protocol(v1::error::Error::InvalidSubmission)), - }; - let mining_device_extranonce: Vec = sv1_submit.extra_nonce2.into(); - let extranonce2 = mining_device_extranonce; - Ok(SubmitSharesExtended { - channel_id, - // I put 0 below cause sequence_number is not what should be TODO - sequence_number: 0, - job_id: sv1_submit.job_id.parse::()?, - nonce: sv1_submit.nonce.0, - ntime: sv1_submit.time.0, - version, - extranonce: extranonce2.try_into()?, - }) - } - - /// Internal helper function to handle a received SV2 `SetNewPrevHash` message. - /// - /// This function processes a `SetNewPrevHash` message received from the upstream. - /// It updates the Bridge's stored last previous hash, informs the `channel_factory` - /// about the new previous hash, and then checks the `future_jobs` buffer for - /// a corresponding `NewExtendedMiningJob`. If a matching future job is found, it constructs a - /// SV1 `mining.notify` message and broadcasts it to all downstream clients. It also updates - /// the `last_notify` state for new connections. - async fn handle_new_prev_hash_( - self_: Arc>, - sv2_set_new_prev_hash: SetNewPrevHash<'static>, - tx_sv1_notify: broadcast::Sender>, - ) -> Result<(), Error<'static>> { - while !crate::upstream_sv2::upstream::IS_NEW_JOB_HANDLED - .load(std::sync::atomic::Ordering::SeqCst) - { - tokio::task::yield_now().await; - } - self_.safe_lock(|s| s.last_p_hash = Some(sv2_set_new_prev_hash.clone()))?; - - let on_new_prev_hash_res = self_.safe_lock(|s| { - s.channel_factory - .on_new_prev_hash(sv2_set_new_prev_hash.clone()) - })?; - on_new_prev_hash_res?; - - let mut future_jobs = self_.safe_lock(|s| { - let future_jobs = s.future_jobs.clone(); - s.future_jobs = vec![]; - future_jobs - })?; - - let mut match_a_future_job = false; - while let Some(job) = future_jobs.pop() { - if job.job_id == sv2_set_new_prev_hash.job_id { - let j_id = job.job_id; - // Create the mining.notify to be sent to the Downstream. - let notify = crate::proxy::next_mining_notify::create_notify( - sv2_set_new_prev_hash.clone(), - job, - true, - ); - - // Get the sender to send the mining.notify to the Downstream - tx_sv1_notify.send(notify.clone())?; - match_a_future_job = true; - self_.safe_lock(|s| { - s.last_notify = Some(notify); - s.last_job_id = j_id; - })?; - break; - } - } - if !match_a_future_job { - debug!("No future jobs for {:?}", sv2_set_new_prev_hash); - } - Ok(()) - } - - /// Task handler that receives SV2 `SetNewPrevHash` messages from the upstream. - /// - /// This loop continuously receives `SetNewPrevHash` messages. It calls the - /// internal `handle_new_prev_hash_` helper function to process each message. - fn handle_new_prev_hash(self_: Arc>) { - let task_collector_handle_new_prev_hash = - self_.safe_lock(|b| b.task_collector.clone()).unwrap(); - let (tx_sv1_notify, rx_sv2_set_new_prev_hash, tx_status) = self_ - .safe_lock(|s| { - ( - s.tx_sv1_notify.clone(), - s.rx_sv2_set_new_prev_hash.clone(), - s.tx_status.clone(), - ) - }) - .unwrap(); - debug!("Starting handle_new_prev_hash task"); - let handle_new_prev_hash = tokio::task::spawn(async move { - loop { - // Receive `SetNewPrevHash` from `Upstream` - let sv2_set_new_prev_hash: SetNewPrevHash = - handle_result!(tx_status, rx_sv2_set_new_prev_hash.clone().recv().await); - debug!( - "handle_new_prev_hash job_id: {:?}", - &sv2_set_new_prev_hash.job_id - ); - handle_result!( - tx_status.clone(), - Self::handle_new_prev_hash_( - self_.clone(), - sv2_set_new_prev_hash, - tx_sv1_notify.clone(), - ) - .await - ) - } - }); - let _ = task_collector_handle_new_prev_hash.safe_lock(|a| { - a.push(( - handle_new_prev_hash.abort_handle(), - "handle_new_prev_hash".to_string(), - )) - }); - } - - /// Internal helper function to handle a received SV2 `NewExtendedMiningJob` message. - /// - /// This function processes a `NewExtendedMiningJob` message received from the upstream. - /// It first informs the `channel_factory` about the new job. If the job's `is_future` is true, - /// the job is buffered in `future_jobs`. If `is_future` is false, it expects a - /// corresponding `SetNewPrevHash` (which should have been received prior according to the - /// protocol) and immediately constructs and broadcasts a SV1 `mining.notify` message to - /// downstream clients, updating the `last_notify` state. - async fn handle_new_extended_mining_job_( - self_: Arc>, - sv2_new_extended_mining_job: NewExtendedMiningJob<'static>, - tx_sv1_notify: broadcast::Sender>, - ) -> Result<(), Error<'static>> { - // convert to non segwit jobs so we dont have to depend if miner's support segwit or not - self_.safe_lock(|s| { - s.channel_factory - .on_new_extended_mining_job(sv2_new_extended_mining_job.as_static().clone()) - })??; - - // If future_job=true, this job is meant for a future SetNewPrevHash that the proxy - // has yet to receive. Insert this new job into the job_mapper . - if sv2_new_extended_mining_job.is_future() { - self_.safe_lock(|s| s.future_jobs.push(sv2_new_extended_mining_job.clone()))?; - Ok(()) - - // If future_job=false, this job is meant for the current SetNewPrevHash. - } else { - let last_p_hash_option = self_.safe_lock(|s| s.last_p_hash.clone())?; - - // last_p_hash is an Option so we need to map to the correct error type - // to be handled - let last_p_hash = last_p_hash_option.ok_or(Error::RolesSv2Logic( - RolesLogicError::JobIsNotFutureButPrevHashNotPresent, - ))?; - - let j_id = sv2_new_extended_mining_job.job_id; - // Create the mining.notify to be sent to the Downstream. - // clean_jobs must be false because it's not a NewPrevHash template - let notify = crate::proxy::next_mining_notify::create_notify( - last_p_hash, - sv2_new_extended_mining_job.clone(), - false, - ); - // Get the sender to send the mining.notify to the Downstream - tx_sv1_notify.send(notify.clone())?; - self_.safe_lock(|s| { - s.last_notify = Some(notify); - s.last_job_id = j_id; - })?; - Ok(()) - } - } - - /// Task handler that receives SV2 `NewExtendedMiningJob` messages from the upstream. - /// - /// This loop continuously receives `NewExtendedMiningJob` messages. It calls the - /// internal `handle_new_extended_mining_job_` helper function to process each message. - /// After processing, it signals that a new job has been handled (used for synchronization - /// with the `handle_new_prev_hash` task). - fn handle_new_extended_mining_job(self_: Arc>) { - let task_collector_new_extended_mining_job = - self_.safe_lock(|b| b.task_collector.clone()).unwrap(); - let (tx_sv1_notify, rx_sv2_new_ext_mining_job, tx_status) = self_ - .safe_lock(|s| { - ( - s.tx_sv1_notify.clone(), - s.rx_sv2_new_ext_mining_job.clone(), - s.tx_status.clone(), - ) - }) - .unwrap(); - debug!("Starting handle_new_extended_mining_job task"); - let handle_new_extended_mining_job = tokio::task::spawn(async move { - loop { - // Receive `NewExtendedMiningJob` from `Upstream` - let sv2_new_extended_mining_job: NewExtendedMiningJob = handle_result!( - tx_status.clone(), - rx_sv2_new_ext_mining_job.clone().recv().await - ); - debug!( - "handle_new_extended_mining_job job_id: {:?}", - &sv2_new_extended_mining_job.job_id - ); - handle_result!( - tx_status, - Self::handle_new_extended_mining_job_( - self_.clone(), - sv2_new_extended_mining_job, - tx_sv1_notify.clone(), - ) - .await - ); - crate::upstream_sv2::upstream::IS_NEW_JOB_HANDLED - .store(true, std::sync::atomic::Ordering::SeqCst); - } - }); - let _ = task_collector_new_extended_mining_job.safe_lock(|a| { - a.push(( - handle_new_extended_mining_job.abort_handle(), - "handle_new_extended_mining_job".to_string(), - )) - }); - } -} - -/// Represents the necessary information to initialize a new SV1 downstream connection -/// after it has been registered with the Bridge's channel factory. -/// -/// This structure is returned by `Bridge::on_new_sv1_connection` and contains the -/// channel ID assigned to the connection, the initial job notification to send, -/// and the extranonce and target specific to this channel. -pub struct OpenSv1Downstream { - /// The unique ID assigned to this downstream channel by the channel factory. - pub channel_id: u32, - /// The most recent `mining.notify` message to send to the new client immediately - /// upon connection to provide them with a job. - pub last_notify: Option>, - /// The extranonce prefix assigned to this channel. - pub extranonce: Vec, - /// The mining target assigned to this channel - pub target: Arc>>, - /// The size of the extranonce2 field expected from the miner for this channel. - pub extranonce2_len: u16, -} - -#[cfg(test)] -mod test { - use super::*; - use async_channel::bounded; - - pub mod test_utils { - use super::*; - - #[allow(dead_code)] - pub struct BridgeInterface { - pub tx_sv1_submit: Sender, - pub rx_sv2_submit_shares_ext: Receiver>, - pub tx_sv2_set_new_prev_hash: Sender>, - pub tx_sv2_new_ext_mining_job: Sender>, - pub rx_sv1_notify: broadcast::Receiver>, - } - - pub fn create_bridge( - extranonces: ExtendedExtranonce, - ) -> (Arc>, BridgeInterface) { - let (tx_sv1_submit, rx_sv1_submit) = bounded(1); - let (tx_sv2_submit_shares_ext, rx_sv2_submit_shares_ext) = bounded(1); - let (tx_sv2_set_new_prev_hash, rx_sv2_set_new_prev_hash) = bounded(1); - let (tx_sv2_new_ext_mining_job, rx_sv2_new_ext_mining_job) = bounded(1); - let (tx_sv1_notify, rx_sv1_notify) = broadcast::channel(1); - let (tx_status, _rx_status) = bounded(1); - let upstream_target = vec![ - 0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]; - let interface = BridgeInterface { - tx_sv1_submit, - rx_sv2_submit_shares_ext, - tx_sv2_set_new_prev_hash, - tx_sv2_new_ext_mining_job, - rx_sv1_notify, - }; - - let task_collector = Arc::new(Mutex::new(vec![])); - let b = Bridge::new( - rx_sv1_submit, - tx_sv2_submit_shares_ext, - rx_sv2_set_new_prev_hash, - rx_sv2_new_ext_mining_job, - tx_sv1_notify, - status::Sender::Bridge(tx_status), - extranonces, - Arc::new(Mutex::new(upstream_target)), - 1, - task_collector, - ); - (b, interface) - } - - pub fn create_sv1_submit(job_id: u32) -> Submit<'static> { - Submit { - user_name: "test_user".to_string(), - job_id: job_id.to_string(), - extra_nonce2: v1::utils::Extranonce::try_from([0; 32].to_vec()).unwrap(), - time: v1::utils::HexU32Be(1), - nonce: v1::utils::HexU32Be(1), - version_bits: None, - id: 0, - } - } - } -} diff --git a/roles/translator/src/lib/proxy/mod.rs b/roles/translator/src/lib/proxy/mod.rs deleted file mode 100644 index e2231be1dd..0000000000 --- a/roles/translator/src/lib/proxy/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod bridge; -pub mod next_mining_notify; -pub use bridge::Bridge; diff --git a/roles/translator/src/lib/proxy/next_mining_notify.rs b/roles/translator/src/lib/proxy/next_mining_notify.rs deleted file mode 100644 index e9a4d08627..0000000000 --- a/roles/translator/src/lib/proxy/next_mining_notify.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! Provides functionality to convert Stratum V2 job into a -//! Stratum V1 `mining.notify` message. -use stratum_common::roles_logic_sv2::{ - job_creator::extended_job_to_non_segwit, - mining_sv2::{NewExtendedMiningJob, SetNewPrevHash}, -}; -use tracing::debug; -use v1::{ - server_to_client, - utils::{HexU32Be, MerkleNode, PrevHash}, -}; - -/// Creates a new SV1 `mining.notify` message if both SV2 `SetNewPrevHash` and -/// `NewExtendedMiningJob` messages have been received. If one of these messages is still being -/// waited on, the function returns `None`. -/// If clean_jobs = false, it means a new job is created, with the same PrevHash -pub fn create_notify( - new_prev_hash: SetNewPrevHash<'static>, - new_job: NewExtendedMiningJob<'static>, - clean_jobs: bool, -) -> server_to_client::Notify<'static> { - // TODO 32 must be changed! - let new_job = extended_job_to_non_segwit(new_job, 32) - .expect("failed to convert extended job to non segwit"); - // Make sure that SetNewPrevHash + NewExtendedMiningJob is matching (not future) - let job_id = new_job.job_id.to_string(); - - // U256<'static> -> MerkleLeaf - let prev_hash = PrevHash(new_prev_hash.prev_hash.clone()); - - // B064K<'static'> -> HexBytes - let coin_base1 = new_job.coinbase_tx_prefix.to_vec().into(); - let coin_base2 = new_job.coinbase_tx_suffix.to_vec().into(); - - // Seq0255<'static, U56<'static>> -> Vec> - let merkle_path = new_job.merkle_path.clone().into_static().0; - let merkle_branch: Vec = merkle_path.into_iter().map(MerkleNode).collect(); - - // u32 -> HexBytes - let version = HexU32Be(new_job.version); - let bits = HexU32Be(new_prev_hash.nbits); - let time = HexU32Be(match new_job.is_future() { - true => new_prev_hash.min_ntime, - false => new_job.min_ntime.clone().into_inner().unwrap(), - }); - - let notify_response = server_to_client::Notify { - job_id, - prev_hash, - coin_base1, - coin_base2, - merkle_branch, - version, - bits, - time, - clean_jobs, - }; - debug!("\nNextMiningNotify: {:?}\n", notify_response); - notify_response -} diff --git a/roles/translator/src/lib/status.rs b/roles/translator/src/lib/status.rs index 74146ddbb4..896cff9a93 100644 --- a/roles/translator/src/lib/status.rs +++ b/roles/translator/src/lib/status.rs @@ -1,226 +1,117 @@ -//! ## Status Reporting System for Translator +//! ## Status Reporting System //! -//! This module defines how internal components of the Translator report -//! health, errors, and shutdown conditions back to the main runtime loop in `lib/mod.rs`. +//! This module provides a centralized way for components of the Translator to report +//! health updates, shutdown reasons, or fatal errors to the main runtime loop. //! -//! At the core, tasks send a [`Status`] (wrapping a [`State`]) through a channel, -//! which is tagged with a [`Sender`] enum to indicate the origin of the message. -//! -//! This allows for centralized, consistent error handling across the application. +//! Each task wraps its report in a [`Status`] and sends it over an async channel, +//! tagged with a [`Sender`] variant that identifies the source subsystem. -use stratum_common::roles_logic_sv2; +use tracing::{debug, error, warn}; -use crate::error::{self, Error}; +use crate::error::TproxyError; /// Identifies the component that originated a [`Status`] update. /// -/// Each sender is associated with a dedicated side of the status channel. -/// This lets the central loop distinguish between errors from different parts of the system. -#[derive(Debug)] -pub enum Sender { - /// Sender for downstream connections. - Downstream(async_channel::Sender>), - /// Sender for downstream listener. - DownstreamListener(async_channel::Sender>), - /// Sender for bridge connections. - Bridge(async_channel::Sender>), - /// Sender for upstream connections. - Upstream(async_channel::Sender>), - /// Sender for template receiver. - TemplateReceiver(async_channel::Sender>), +/// Each variant contains a channel to the main coordinator, and optionally a component ID +/// (e.g. a downstream connection ID). +#[derive(Debug, Clone)] +pub enum StatusSender { + /// A specific downstream connection. + Downstream { + downstream_id: u32, + tx: async_channel::Sender, + }, + /// The SV1 server listener. + Sv1Server(async_channel::Sender), + /// The SV2 <-> SV1 bridge manager. + ChannelManager(async_channel::Sender), + /// The upstream SV2 connection handler. + Upstream(async_channel::Sender), } -impl Sender { - /// Converts a `DownstreamListener` sender to a `Downstream` sender. - /// FIXME: Use `From` trait and remove this - pub fn listener_to_connection(&self) -> Self { +impl StatusSender { + /// Sends a [`Status`] update. + pub async fn send(&self, status: Status) -> Result<(), async_channel::SendError> { match self { - Self::DownstreamListener(inner) => Self::Downstream(inner.clone()), - _ => unreachable!(), - } - } - - /// Sends a status update. - pub async fn send( - &self, - status: Status<'static>, - ) -> Result<(), async_channel::SendError>> { - match self { - Self::Downstream(inner) => inner.send(status).await, - Self::DownstreamListener(inner) => inner.send(status).await, - Self::Bridge(inner) => inner.send(status).await, - Self::Upstream(inner) => inner.send(status).await, - Self::TemplateReceiver(inner) => inner.send(status).await, - } - } -} - -impl Clone for Sender { - fn clone(&self) -> Self { - match self { - Self::Downstream(inner) => Self::Downstream(inner.clone()), - Self::DownstreamListener(inner) => Self::DownstreamListener(inner.clone()), - Self::Bridge(inner) => Self::Bridge(inner.clone()), - Self::Upstream(inner) => Self::Upstream(inner.clone()), - Self::TemplateReceiver(inner) => Self::TemplateReceiver(inner.clone()), + Self::Downstream { downstream_id, tx } => { + debug!( + "Sending status from Downstream [{}]: {:?}", + downstream_id, status.state + ); + tx.send(status).await + } + Self::Sv1Server(tx) => { + debug!("Sending status from Sv1Server: {:?}", status.state); + tx.send(status).await + } + Self::ChannelManager(tx) => { + debug!("Sending status from ChannelManager: {:?}", status.state); + tx.send(status).await + } + Self::Upstream(tx) => { + debug!("Sending status from Upstream: {:?}", status.state); + tx.send(status).await + } } } } -/// The kind of event or status being reported by a task. +/// The type of event or error being reported by a component. #[derive(Debug)] -pub enum State<'a> { - /// Downstream connection shutdown. - DownstreamShutdown(Error<'a>), - /// Bridge connection shutdown. - BridgeShutdown(Error<'a>), - /// Upstream connection shutdown. - UpstreamShutdown(Error<'a>), - /// Upstream connection trying to reconnect. - UpstreamTryReconnect(Error<'a>), - /// Component is healthy. - Healthy(String), +pub enum State { + /// Downstream task exited or encountered an unrecoverable error. + DownstreamShutdown { + downstream_id: u32, + reason: TproxyError, + }, + /// SV1 server listener exited unexpectedly. + Sv1ServerShutdown(TproxyError), + /// Channel manager shut down (SV2 bridge manager). + ChannelManagerShutdown(TproxyError), + /// Upstream SV2 connection closed or failed. + UpstreamShutdown(TproxyError), } -/// Wraps a status update, to be passed through a status channel. +/// A message reporting the current [`State`] of a component. #[derive(Debug)] -pub struct Status<'a> { - pub state: State<'a>, +pub struct Status { + pub state: State, } -/// Sends a [`Status`] message tagged with its [`Sender`] to the central loop. -/// -/// This is the core logic used to determine which status variant should be sent -/// based on the error type and sender context. -async fn send_status( - sender: &Sender, - e: error::Error<'static>, - outcome: error_handling::ErrorBranch, -) -> error_handling::ErrorBranch { - match sender { - Sender::Downstream(tx) => { - tx.send(Status { - state: State::Healthy(e.to_string()), - }) - .await - .unwrap_or(()); +/// Constructs and sends a [`Status`] update based on the [`Sender`] and error context. +async fn send_status(sender: &StatusSender, error: TproxyError) { + let state = match sender { + StatusSender::Downstream { downstream_id, .. } => { + warn!("Downstream [{downstream_id}] shutting down due to error: {error:?}"); + State::DownstreamShutdown { + downstream_id: *downstream_id, + reason: error, + } } - Sender::DownstreamListener(tx) => { - tx.send(Status { - state: State::DownstreamShutdown(e), - }) - .await - .unwrap_or(()); + StatusSender::Sv1Server(_) => { + warn!("Sv1Server shutting down due to error: {error:?}"); + State::Sv1ServerShutdown(error) } - Sender::Bridge(tx) => { - tx.send(Status { - state: State::BridgeShutdown(e), - }) - .await - .unwrap_or(()); + StatusSender::ChannelManager(_) => { + warn!("ChannelManager shutting down due to error: {error:?}"); + State::ChannelManagerShutdown(error) } - Sender::Upstream(tx) => match e { - Error::ChannelErrorReceiver(_) => { - tx.send(Status { - state: State::UpstreamTryReconnect(e), - }) - .await - .unwrap_or(()); - } - _ => { - tx.send(Status { - state: State::UpstreamShutdown(e), - }) - .await - .unwrap_or(()); - } - }, - Sender::TemplateReceiver(tx) => { - tx.send(Status { - state: State::UpstreamShutdown(e), - }) - .await - .unwrap_or(()); + StatusSender::Upstream(_) => { + warn!("Upstream shutting down due to error: {error:?}"); + State::UpstreamShutdown(error) } + }; + + if let Err(e) = sender.send(Status { state }).await { + error!("Failed to send status update from {sender:?}: {e:?}"); } - outcome } /// Centralized error dispatcher for the Translator. /// /// Used by the `handle_result!` macro across the codebase. /// Decides whether the task should `Continue` or `Break` based on the error type and source. -pub async fn handle_error( - sender: &Sender, - e: error::Error<'static>, -) -> error_handling::ErrorBranch { - tracing::error!("Error: {:?}", &e); - match e { - Error::VecToSlice32(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors on bad CLI argument input. - Error::BadCliArgs => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors on bad `serde_json` serialize/deserialize. - Error::BadSerdeJson(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors on bad `config` TOML deserialize. - Error::BadConfigDeserialize(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - // Errors from `binary_sv2` crate. - Error::BinarySv2(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors on bad noise handshake. - Error::CodecNoise(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors from `framing_sv2` crate. - Error::FramingSv2(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - //If the pool sends the tproxy an invalid extranonce - Error::InvalidExtranonce(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - // Errors on bad `TcpStream` connection. - Error::Io(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors on bad `String` to `int` conversion. - Error::ParseInt(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors from `roles_logic_sv2` crate. - Error::RolesSv2Logic(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - Error::UpstreamIncoming(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - // SV1 protocol library error - Error::V1Protocol(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - Error::SubprotocolMining(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - // Locking Errors - Error::PoisonLock => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Channel Receiver Error - Error::ChannelErrorReceiver(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - Error::TokioChannelErrorRecv(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - // Channel Sender Errors - Error::ChannelErrorSender(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - Error::SetDifficultyToMessage(_) => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - Error::Infallible(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - Error::Sv2ProtocolError(ref inner) => { - match inner { - // dont notify main thread just continue - roles_logic_sv2::parsers_sv2::Mining::SubmitSharesError(_) => { - error_handling::ErrorBranch::Continue - } - _ => send_status(sender, e, error_handling::ErrorBranch::Break).await, - } - } - Error::TargetError(_) => { - send_status(sender, e, error_handling::ErrorBranch::Continue).await - } - Error::Sv1MessageTooLong => { - send_status(sender, e, error_handling::ErrorBranch::Break).await - } - Error::Parser(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - } +pub async fn handle_error(sender: &StatusSender, e: TproxyError) { + error!("Error in {:?}: {:?}", sender, e); + send_status(sender, e).await; } diff --git a/roles/translator/src/lib/sv1/downstream/channel.rs b/roles/translator/src/lib/sv1/downstream/channel.rs new file mode 100644 index 0000000000..33aafbb84a --- /dev/null +++ b/roles/translator/src/lib/sv1/downstream/channel.rs @@ -0,0 +1,36 @@ +use super::DownstreamMessages; +use async_channel::{Receiver, Sender}; +use tokio::sync::broadcast; +use tracing::debug; +use v1::json_rpc; + +#[derive(Debug)] +pub struct DownstreamChannelState { + pub downstream_sv1_sender: Sender, + pub downstream_sv1_receiver: Receiver, + #[allow(dead_code)] // Used in message_handler.rs for share submission + pub sv1_server_sender: Sender, + pub sv1_server_receiver: broadcast::Receiver<(u32, Option, json_rpc::Message)>, /* channel_id, optional downstream_id, message */ +} + +impl DownstreamChannelState { + pub fn new( + downstream_sv1_sender: Sender, + downstream_sv1_receiver: Receiver, + sv1_server_sender: Sender, + sv1_server_receiver: broadcast::Receiver<(u32, Option, json_rpc::Message)>, + ) -> Self { + Self { + downstream_sv1_receiver, + downstream_sv1_sender, + sv1_server_receiver, + sv1_server_sender, + } + } + + pub fn drop(&self) { + debug!("Dropping downstream channel state"); + self.downstream_sv1_receiver.close(); + self.downstream_sv1_sender.close(); + } +} diff --git a/roles/translator/src/lib/sv1/downstream/data.rs b/roles/translator/src/lib/sv1/downstream/data.rs new file mode 100644 index 0000000000..c6e95b73ca --- /dev/null +++ b/roles/translator/src/lib/sv1/downstream/data.rs @@ -0,0 +1,68 @@ +use crate::sv1::downstream::DownstreamMessages; +use async_channel::Sender; +use roles_logic_sv2::mining_sv2::Target; +use tracing::debug; +use v1::{json_rpc, server_to_client, utils::HexU32Be}; + +#[derive(Debug, Clone)] +pub struct DownstreamData { + pub channel_id: Option, + pub downstream_id: u32, + pub extranonce1: Vec, + pub extranonce2_len: usize, + pub version_rolling_mask: Option, + pub version_rolling_min_bit: Option, + pub last_job_version_field: Option, + pub authorized_worker_names: Vec, + pub user_identity: String, + pub valid_jobs: Vec>, + pub target: Target, + pub hashrate: f32, + pub pending_set_difficulty: Option, + pub pending_target: Option, + pub pending_hashrate: Option, + pub sv1_server_sender: Sender, // just here for time being + pub first_set_difficulty_received: bool, + // this is used to store the first notify message received in case it is received before the + // first set_difficulty + pub waiting_first_notify: Option, +} + +impl DownstreamData { + pub fn new( + downstream_id: u32, + target: Target, + hashrate: f32, + sv1_server_sender: Sender, + ) -> Self { + DownstreamData { + channel_id: None, + downstream_id, + extranonce1: vec![0; 8], + extranonce2_len: 4, + version_rolling_mask: None, + version_rolling_min_bit: None, + last_job_version_field: None, + authorized_worker_names: Vec::new(), + user_identity: String::new(), + valid_jobs: Vec::new(), + target, + hashrate, + pending_set_difficulty: None, + pending_target: None, + pending_hashrate: None, + sv1_server_sender, + first_set_difficulty_received: false, + waiting_first_notify: None, + } + } + + pub fn set_pending_target_and_hashrate(&mut self, new_target: Target, new_hashrate: f32) { + self.pending_target = Some(new_target); + self.pending_hashrate = Some(new_hashrate); + debug!( + "Downstream {}: Set pending target and hashrate", + self.downstream_id + ); + } +} diff --git a/roles/translator/src/lib/sv1/downstream/downstream.rs b/roles/translator/src/lib/sv1/downstream/downstream.rs new file mode 100644 index 0000000000..66a14b5020 --- /dev/null +++ b/roles/translator/src/lib/sv1/downstream/downstream.rs @@ -0,0 +1,447 @@ +use super::DownstreamMessages; +use crate::{ + error::TproxyError, + status::{handle_error, StatusSender}, + sv1::downstream::{channel::DownstreamChannelState, data::DownstreamData}, + task_manager::TaskManager, + utils::ShutdownMessage, +}; +use async_channel::{Receiver, Sender}; +use roles_logic_sv2::{mining_sv2::Target, utils::Mutex}; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tracing::{debug, error, info, warn}; +use v1::{ + json_rpc::{self, Message}, + server_to_client, IsServer, +}; + +/// Represents a downstream SV1 miner connection. +/// +/// This struct manages the state and communication for a single SV1 miner connected +/// to the translator. It handles: +/// - SV1 protocol message processing (subscribe, authorize, submit) +/// - Bidirectional message routing between miner and SV1 server +/// - Mining job tracking and share validation +/// - Difficulty adjustment coordination +/// - Connection lifecycle management +/// +/// Each downstream connection runs in its own async task that processes messages +/// from both the miner and the server, ensuring proper message ordering and +/// handling connection-specific state. +#[derive(Debug)] +pub struct Downstream { + pub downstream_data: Arc>, + downstream_channel_state: DownstreamChannelState, +} + +impl Downstream { + /// Creates a new downstream connection instance. + /// + /// # Arguments + /// * `downstream_id` - Unique identifier for this downstream connection + /// * `downstream_sv1_sender` - Channel to send messages to the miner + /// * `downstream_sv1_receiver` - Channel to receive messages from the miner + /// * `sv1_server_sender` - Channel to send messages to the SV1 server + /// * `sv1_server_receiver` - Broadcast channel to receive messages from the SV1 server + /// * `target` - Initial difficulty target for this connection + /// * `hashrate` - Initial hashrate estimate for this connection + /// + /// # Returns + /// A new Downstream instance ready to handle miner communication + pub fn new( + downstream_id: u32, + downstream_sv1_sender: Sender, + downstream_sv1_receiver: Receiver, + sv1_server_sender: Sender, + sv1_server_receiver: broadcast::Receiver<(u32, Option, json_rpc::Message)>, + target: Target, + hashrate: f32, + ) -> Self { + let downstream_data = Arc::new(Mutex::new(DownstreamData::new( + downstream_id, + target, + hashrate, + sv1_server_sender.clone(), + ))); + let downstream_channel_state = DownstreamChannelState::new( + downstream_sv1_sender, + downstream_sv1_receiver, + sv1_server_sender, + sv1_server_receiver, + ); + Self { + downstream_data, + downstream_channel_state, + } + } + + /// Spawns and runs the main task loop for this downstream connection. + /// + /// This method creates an async task that handles all communication for this + /// downstream connection. The task runs a select loop that processes: + /// - Shutdown signals (global, targeted, or all-downstream) + /// - Messages from the miner (subscribe, authorize, submit) + /// - Messages from the SV1 server (notify, set_difficulty, etc.) + /// + /// The task will continue running until a shutdown signal is received or + /// an unrecoverable error occurs. It ensures graceful cleanup of resources + /// and proper error reporting. + /// + /// # Arguments + /// * `notify_shutdown` - Broadcast channel for receiving shutdown signals + /// * `shutdown_complete_tx` - Channel to signal when shutdown is complete + /// * `status_sender` - Channel for sending status updates and errors + /// * `task_manager` - Manager for tracking spawned tasks + pub fn run_downstream_tasks( + self: Arc, + notify_shutdown: broadcast::Sender, + shutdown_complete_tx: mpsc::Sender<()>, + status_sender: StatusSender, + task_manager: Arc, + ) { + let mut sv1_server_receiver = self + .downstream_channel_state + .sv1_server_receiver + .resubscribe(); + let mut shutdown_rx = notify_shutdown.subscribe(); + let downstream_id = self.downstream_data.super_safe_lock(|d| d.downstream_id); + + info!("Downstream {downstream_id}: spawning unified task"); + + task_manager.spawn(async move { + loop { + tokio::select! { + msg = shutdown_rx.recv() => { + match msg { + Ok(ShutdownMessage::ShutdownAll) => { + info!("Downstream {downstream_id}: received global shutdown"); + break; + } + Ok(ShutdownMessage::DownstreamShutdown(id)) if id == downstream_id => { + info!("Downstream {downstream_id}: received targeted shutdown"); + break; + } + Ok(ShutdownMessage::DownstreamShutdownAll) => { + info!("All downstream shutdown message received"); + break; + } + Ok(_) => { + // shutdown for other downstream + } + Err(e) => { + warn!("Downstream {downstream_id}: shutdown channel closed: {e}"); + break; + } + } + } + + // Handle downstream -> server message + res = Self::handle_downstream_message(self.clone()) => { + if let Err(e) = res { + error!("Downstream {downstream_id}: error in downstream message handler: {e:?}"); + handle_error(&status_sender, e).await; + break; + } + } + + // Handle server -> downstream message + res = Self::handle_sv1_server_message(self.clone(),&mut sv1_server_receiver) => { + if let Err(e) = res { + error!("Downstream {downstream_id}: error in server message handler: {e:?}"); + handle_error(&status_sender, e).await; + break; + } + } + + else => { + warn!("Downstream {downstream_id}: all channels closed; exiting task"); + break; + } + } + } + + warn!("Downstream {downstream_id}: unified task shutting down"); + self.downstream_channel_state.drop(); + drop(shutdown_complete_tx); + }); + } + + /// Handles messages received from the SV1 server. + /// + /// This method processes messages broadcast from the SV1 server to downstream + /// connections. It implements special logic to handle the timing issue where + /// `mining.notify` messages might arrive before `mining.set_difficulty` messages. + /// + /// Key behaviors: + /// - Filters messages by channel ID and downstream ID + /// - For `mining.set_difficulty`: Updates target/hashrate and processes any waiting notify + /// - For `mining.notify`: Ensures set_difficulty is sent first, handles first-notify timing + /// - For other messages: Forwards directly to the miner + /// + /// The method ensures that miners always receive `set_difficulty` before `notify` + /// for the first message pair, which prevents miners from being unable to start working. + /// + /// # Arguments + /// * `sv1_server_receiver` - Broadcast receiver for messages from the SV1 server + /// + /// # Returns + /// * `Ok(())` - Message processed successfully + /// * `Err(TproxyError)` - Error processing the message + pub async fn handle_sv1_server_message( + self: Arc, + sv1_server_receiver: &mut broadcast::Receiver<(u32, Option, json_rpc::Message)>, + ) -> Result<(), TproxyError> { + match sv1_server_receiver.recv().await { + Ok((channel_id, downstream_id, message)) => { + let (my_channel_id, my_downstream_id) = self + .downstream_data + .super_safe_lock(|d| (d.channel_id, d.downstream_id)); + + let id_matches = (my_channel_id == Some(channel_id) || channel_id == 0) + && (downstream_id.is_none() || downstream_id == Some(my_downstream_id)); + + if !id_matches { + return Ok(()); // Message not intended for this downstream + } + + if let Message::Notification(notification) = &message { + match notification.method.as_str() { + "mining.set_difficulty" => { + self.downstream_data.super_safe_lock(|d| { + d.pending_set_difficulty = Some(message.clone()); + d.first_set_difficulty_received = true; + }); + + // Check if we have a waiting first notify to process + let waiting_notify = self + .downstream_data + .super_safe_lock(|d| d.waiting_first_notify.take()); + + if let Some(notify_msg) = waiting_notify { + debug!("Down: Processing waiting first notify after receiving set_difficulty"); + // Process the waiting notify message + if let Message::Notification(notify_notification) = ¬ify_msg { + if let Ok(notify) = server_to_client::Notify::try_from( + notify_notification.clone(), + ) { + // Send set_difficulty first + if let Some(set_difficulty_msg) = self + .downstream_data + .super_safe_lock(|d| d.pending_set_difficulty.clone()) + { + self.downstream_channel_state + .downstream_sv1_sender + .send(set_difficulty_msg) + .await + .map_err(|e| { + error!("Failed to send set_difficulty to downstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + + self.downstream_data.super_safe_lock(|d| { + if let Some(new_target) = d.pending_target.take() { + d.target = new_target; + } + if let Some(new_hashrate) = + d.pending_hashrate.take() + { + d.hashrate = new_hashrate; + } + d.pending_set_difficulty = None; + }); + } + + // Now send the notify + self.downstream_data.super_safe_lock(|d| { + d.last_job_version_field = Some(notify.version.0); + if notify.clean_jobs { + d.valid_jobs.clear(); + } + d.valid_jobs.push(notify.clone()); + }); + + self.downstream_channel_state + .downstream_sv1_sender + .send(notify.into()) + .await + .map_err(|e| { + error!( + "Failed to send notify to downstream: {:?}", + e + ); + TproxyError::ChannelErrorSender + })?; + } + } + } + return Ok(()); // set_difficulty handled + } + "mining.notify" => { + debug!("Down: Received notify notification"); + // If this is the first notify and we haven't received set_difficulty + // yet, store it and wait + let should_wait = self.downstream_data.super_safe_lock(|d| { + !d.first_set_difficulty_received && d.valid_jobs.is_empty() + }); + + if should_wait { + debug!("Down: First notify received before set_difficulty, storing and waiting..."); + self.downstream_data.super_safe_lock(|d| { + d.waiting_first_notify = Some(message.clone()); + }); + return Ok(()); // Store and wait for set_difficulty + } + + let pending_set_difficulty = self + .downstream_data + .super_safe_lock(|d| d.pending_set_difficulty.clone()); + + if let Some(set_difficulty_msg) = &pending_set_difficulty { + debug!("Down: Sending pending set_difficulty before notify"); + self.downstream_channel_state + .downstream_sv1_sender + .send(set_difficulty_msg.clone()) + .await + .map_err(|e| { + error!( + "Failed to send set_difficulty to downstream: {:?}", + e + ); + TproxyError::ChannelErrorSender + })?; + + self.downstream_data.super_safe_lock(|d| { + if let Some(new_target) = d.pending_target.take() { + d.target = new_target; + } + if let Some(new_hashrate) = d.pending_hashrate.take() { + d.hashrate = new_hashrate; + } + d.pending_set_difficulty = None; + debug!( + "Downstream {}: Updated target and hashrate after sending set_difficulty", + d.downstream_id + ); + }); + } + + if let Ok(mut notify) = + server_to_client::Notify::try_from(notification.clone()) + { + let original_clean_jobs = notify.clean_jobs; + + if pending_set_difficulty.is_some() { + notify.clean_jobs = true; + debug!( + "Down: Sending notify with clean_jobs=true after set_difficulty" + ); + } + + self.downstream_data.super_safe_lock(|d| { + d.last_job_version_field = Some(notify.version.0); + if original_clean_jobs { + d.valid_jobs.clear(); + } + d.valid_jobs.push(notify.clone()); + debug!("Updated valid jobs: {:?}", d.valid_jobs); + }); + + self.downstream_channel_state + .downstream_sv1_sender + .send(notify.into()) + .await + .map_err(|e| { + error!("Failed to send notify to downstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + + return Ok(()); // Notify handled, don't fall through + } + } + _ => {} // Not a special message, proceed below + } + } + + // Default path: forward all other messages + self.downstream_channel_state + .downstream_sv1_sender + .send(message.clone()) + .await + .map_err(|e| { + error!("Failed to send message to downstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + } + Err(e) => { + let downstream_id = self.downstream_data.super_safe_lock(|d| d.downstream_id); + error!( + "Sv1 message handler error for downstream {}: {:?}", + downstream_id, e + ); + return Err(TproxyError::BroadcastChannelErrorReceiver(e)); + } + } + + Ok(()) + } + + /// Handles messages received from the downstream SV1 miner. + /// + /// This method processes SV1 protocol messages sent by the miner, including: + /// - `mining.subscribe` - Subscription requests + /// - `mining.authorize` - Authorization requests + /// - `mining.submit` - Share submissions + /// - Other SV1 protocol messages + /// + /// The method delegates message processing to the downstream data handler, + /// which implements the SV1 protocol logic and generates appropriate responses. + /// Responses are sent back to the miner, while share submissions are forwarded + /// to the SV1 server for upstream processing. + /// + /// # Returns + /// * `Ok(())` - Message processed successfully + /// * `Err(TproxyError)` - Error receiving or processing the message + pub async fn handle_downstream_message(self: Arc) -> Result<(), TproxyError> { + let message = match self + .downstream_channel_state + .downstream_sv1_receiver + .recv() + .await + { + Ok(msg) => msg, + Err(e) => { + error!("Error receiving downstream message: {:?}", e); + return Err(TproxyError::ChannelErrorReceiver(e)); + } + }; + + let response = self + .downstream_data + .super_safe_lock(|data| data.handle_message(message)); + + match response { + Ok(Some(response_msg)) => { + if let Some(_channel_id) = self.downstream_data.super_safe_lock(|d| d.channel_id) { + self.downstream_channel_state + .downstream_sv1_sender + .send(response_msg.into()) + .await + .map_err(|e| { + error!("Failed to send message to downstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + } + } + Ok(None) => { + // Message was handled but no response needed + } + Err(e) => { + error!("Error handling downstream message: {:?}", e); + return Err(e.into()); + } + } + + Ok(()) + } +} diff --git a/roles/translator/src/lib/sv1/downstream/message_handler.rs b/roles/translator/src/lib/sv1/downstream/message_handler.rs new file mode 100644 index 0000000000..b22121e8ab --- /dev/null +++ b/roles/translator/src/lib/sv1/downstream/message_handler.rs @@ -0,0 +1,153 @@ +use tracing::{debug, error, info}; +use v1::{ + client_to_server, json_rpc, server_to_client, + utils::{Extranonce, HexU32Be}, + IsServer, +}; + +use crate::{ + sv1::downstream::{data::DownstreamData, DownstreamMessages, SubmitShareWithChannelId}, + utils::validate_sv1_share, +}; + +// Implements `IsServer` for `Downstream` to handle the SV1 messages. +impl IsServer<'static> for DownstreamData { + fn handle_configure( + &mut self, + request: &client_to_server::Configure, + ) -> (Option, Option) { + info!("Down: Configuring"); + debug!("Down: Handling mining.configure: {:?}", &request); + self.version_rolling_mask = request + .version_rolling_mask() + .map(|mask| HexU32Be(mask & 0x1FFFE000)); + self.version_rolling_min_bit = request.version_rolling_min_bit_count(); + + debug!( + "Negotiated version_rolling_mask is {:?}", + self.version_rolling_mask + ); + ( + Some(server_to_client::VersionRollingParams::new( + self.version_rolling_mask.clone().unwrap_or(HexU32Be(0)), + self.version_rolling_min_bit.clone().unwrap_or(HexU32Be(0)), + ).expect("Version mask invalid, automatic version mask selection not supported, please change it in carte::downstream_sv1::mod.rs")), + Some(false), + ) + } + + fn handle_subscribe(&self, request: &client_to_server::Subscribe) -> Vec<(String, String)> { + info!("Down: Subscribing"); + debug!("Down: Handling mining.subscribe: {:?}", &request); + + let set_difficulty_sub = ( + "mining.set_difficulty".to_string(), + self.downstream_id.to_string(), + ); + + let notify_sub = ( + "mining.notify".to_string(), + "ae6812eb4cd7735a302a8a9dd95cf71f".to_string(), + ); + + vec![set_difficulty_sub, notify_sub] + } + + fn handle_authorize(&self, request: &client_to_server::Authorize) -> bool { + info!("Down: Authorizing"); + debug!("Down: Handling mining.authorize: {:?}", &request); + true + } + + fn handle_submit(&self, request: &client_to_server::Submit<'static>) -> bool { + if let Some(channel_id) = self.channel_id { + let is_valid_share = validate_sv1_share( + request, + self.target.clone(), + self.extranonce1.clone(), + self.version_rolling_mask.clone(), + &self.valid_jobs, + ) + .unwrap_or(false); + if !is_valid_share { + return false; + } + let to_send: SubmitShareWithChannelId = SubmitShareWithChannelId { + channel_id, + downstream_id: self.downstream_id, + share: request.clone(), + extranonce: self.extranonce1.clone(), + extranonce2_len: self.extranonce2_len, + version_rolling_mask: self.version_rolling_mask.clone(), + last_job_version: self.last_job_version_field, + }; + if let Err(e) = self + .sv1_server_sender + .try_send(DownstreamMessages::SubmitShares(to_send)) + { + error!("Failed to send share to SV1 server: {:?}", e); + } + true + } else { + error!("Cannot submit share: channel_id is None (waiting for OpenExtendedMiningChannelSuccess)"); + false + } + } + + /// Indicates to the server that the client supports the mining.set_extranonce method. + fn handle_extranonce_subscribe(&self) {} + + /// Checks if a Downstream role is authorized. + fn is_authorized(&self, name: &str) -> bool { + self.authorized_worker_names.contains(&name.to_string()) + } + + /// Authorizes a Downstream role. + fn authorize(&mut self, name: &str) { + self.authorized_worker_names.push(name.to_string()); + } + + /// Sets the `extranonce1` field sent in the SV1 `mining.notify` message to the value specified + /// by the SV2 `OpenExtendedMiningChannelSuccess` message sent from the Upstream role. + fn set_extranonce1( + &mut self, + _extranonce1: Option>, + ) -> Extranonce<'static> { + self.extranonce1.clone().try_into().unwrap() + } + + /// Returns the `Downstream`'s `extranonce1` value. + fn extranonce1(&self) -> Extranonce<'static> { + self.extranonce1.clone().try_into().unwrap() + } + + /// Sets the `extranonce2_size` field sent in the SV1 `mining.notify` message to the value + /// specified by the SV2 `OpenExtendedMiningChannelSuccess` message sent from the Upstream role. + fn set_extranonce2_size(&mut self, _extra_nonce2_size: Option) -> usize { + self.extranonce2_len + } + + /// Returns the `Downstream`'s `extranonce2_size` value. + fn extranonce2_size(&self) -> usize { + self.extranonce2_len + } + + /// Returns the version rolling mask. + fn version_rolling_mask(&self) -> Option { + self.version_rolling_mask.clone() + } + + /// Sets the version rolling mask. + fn set_version_rolling_mask(&mut self, mask: Option) { + self.version_rolling_mask = mask; + } + + /// Sets the minimum version rolling bit. + fn set_version_rolling_min_bit(&mut self, mask: Option) { + self.version_rolling_min_bit = mask + } + + fn notify(&'_ mut self) -> Result> { + unreachable!() + } +} diff --git a/roles/translator/src/lib/sv1/downstream/mod.rs b/roles/translator/src/lib/sv1/downstream/mod.rs new file mode 100644 index 0000000000..a731c1e5e1 --- /dev/null +++ b/roles/translator/src/lib/sv1/downstream/mod.rs @@ -0,0 +1,72 @@ +pub(super) mod channel; +pub(super) mod data; +pub mod downstream; +mod message_handler; + +use v1::{client_to_server::Submit, utils::HexU32Be}; + +/// Messages sent from downstream handling logic to the SV1 server. +/// +/// This enum defines the types of messages that downstream connections can send +/// to the central SV1 server for processing and forwarding to upstream. +#[derive(Debug)] +pub enum DownstreamMessages { + /// Represents a submitted share from a downstream miner, + /// wrapped with the relevant channel ID. + SubmitShares(SubmitShareWithChannelId), +} + +/// A wrapper around a `mining.submit` message with additional channel information. +/// +/// This struct contains all the necessary information to process a share submission +/// from an SV1 miner, including the share data itself and metadata needed for +/// proper routing and validation. +#[derive(Debug)] +pub struct SubmitShareWithChannelId { + /// The SV2 channel ID this share belongs to + pub channel_id: u32, + /// The downstream connection ID that submitted this share + pub downstream_id: u32, + /// The actual SV1 share submission data + pub share: Submit<'static>, + /// The complete extranonce used for this share + pub extranonce: Vec, + /// The length of the extranonce2 field + pub extranonce2_len: usize, + /// Optional version rolling mask for the share + pub version_rolling_mask: Option, + /// The version field from the last job, used for validation + pub last_job_version: Option, +} + +/// Sends a shutdown signal to a downstream task. +/// +/// This is a convenience function that sends a message on the downstream task +/// shutdown channel. The specific message content doesn't matter as receiving +/// any message triggers shutdown. +/// +/// # Arguments +/// * `sender` - The channel sender to signal shutdown on +/// +/// # Panics +/// This function will panic if the channel send fails, which only happens if +/// all receiving ends have already been dropped (meaning tasks are already shut down). +pub async fn kill(sender: &async_channel::Sender) { + // safe to unwrap since the only way this can fail is if all receiving channels are dropped + // meaning all tasks have already dropped + sender.send(true).await.unwrap(); +} + +/// Generates a subscription ID for SV1 mining connections. +/// +/// Currently returns a hardcoded string value. This should be replaced with +/// a proper ID generation mechanism in the future. +/// +/// # Returns +/// A string to be used as a subscription ID +/// +/// # TODO +/// Replace with proper random ID generation +pub fn new_subscription_id() -> String { + "ae6812eb4cd7735a302a8a9dd95cf71f".into() +} diff --git a/roles/translator/src/lib/sv1/mod.rs b/roles/translator/src/lib/sv1/mod.rs new file mode 100644 index 0000000000..41d5445cc8 --- /dev/null +++ b/roles/translator/src/lib/sv1/mod.rs @@ -0,0 +1,17 @@ +//! ## Downstream SV1 Module +//! +//! This module defines the structures, messages, and utility functions +//! used for handling the downstream connection with SV1 mining clients. +//! +//! It includes definitions for messages exchanged with a Bridge component, +//! structures for submitting shares and updating targets, and constants +//! and functions for managing client interactions. +//! +//! The module is organized into the following sub-modules: +//! - [`diff_management`]: (Declared here, likely contains downstream difficulty logic) +//! - [`downstream`]: Defines the core [`Downstream`] struct and its functionalities. + +pub mod downstream; +pub mod sv1_server; +pub mod translation_utils; +pub use sv1_server::sv1_server::Sv1Server; diff --git a/roles/translator/src/lib/sv1/sv1_server/channel.rs b/roles/translator/src/lib/sv1/sv1_server/channel.rs new file mode 100644 index 0000000000..94fba87dd7 --- /dev/null +++ b/roles/translator/src/lib/sv1/sv1_server/channel.rs @@ -0,0 +1,42 @@ +use crate::sv1::downstream::DownstreamMessages; +use async_channel::{unbounded, Receiver, Sender}; +use roles_logic_sv2::parsers_sv2::Mining; + +use tokio::sync::broadcast; +use v1::json_rpc; + +pub struct Sv1ServerChannelState { + pub sv1_server_to_downstream_sender: broadcast::Sender<(u32, Option, json_rpc::Message)>, + pub downstream_to_sv1_server_sender: Sender, + pub downstream_to_sv1_server_receiver: Receiver, + pub channel_manager_receiver: Receiver>, + pub channel_manager_sender: Sender>, +} + +impl Sv1ServerChannelState { + pub fn new( + channel_manager_receiver: Receiver>, + channel_manager_sender: Sender>, + ) -> Self { + let (sv1_server_to_downstream_sender, _) = broadcast::channel(10); + // mpsc - sender is only clonable and receiver are not.. + let (downstream_to_sv1_server_sender, downstream_to_sv1_server_receiver) = unbounded(); + + Self { + sv1_server_to_downstream_sender, + downstream_to_sv1_server_receiver, + downstream_to_sv1_server_sender, + channel_manager_receiver, + channel_manager_sender, + } + } + + pub fn drop(&self) { + self.channel_manager_receiver.close(); + self.channel_manager_sender.close(); + self.downstream_to_sv1_server_receiver.close(); + self.downstream_to_sv1_server_sender.close(); + self.channel_manager_receiver.close(); + self.channel_manager_sender.close(); + } +} diff --git a/roles/translator/src/lib/sv1/sv1_server/data.rs b/roles/translator/src/lib/sv1/sv1_server/data.rs new file mode 100644 index 0000000000..ee61827ed7 --- /dev/null +++ b/roles/translator/src/lib/sv1/sv1_server/data.rs @@ -0,0 +1,26 @@ +use crate::sv1::downstream::downstream::Downstream; +use roles_logic_sv2::{ + mining_sv2::SetNewPrevHash, utils::Id as IdFactory, vardiff::classic::VardiffState, +}; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +pub struct Sv1ServerData { + pub downstreams: HashMap>, + pub vardiff: HashMap>>, + pub prevhash: Option>, + pub downstream_id_factory: IdFactory, +} + +impl Sv1ServerData { + pub fn new() -> Self { + Self { + downstreams: HashMap::new(), + vardiff: HashMap::new(), + prevhash: None, + downstream_id_factory: IdFactory::new(), + } + } +} diff --git a/roles/translator/src/lib/sv1/sv1_server/mod.rs b/roles/translator/src/lib/sv1/sv1_server/mod.rs new file mode 100644 index 0000000000..a9d7b204d3 --- /dev/null +++ b/roles/translator/src/lib/sv1/sv1_server/mod.rs @@ -0,0 +1,3 @@ +pub(super) mod channel; +pub(super) mod data; +pub mod sv1_server; diff --git a/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs b/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs new file mode 100644 index 0000000000..6b9812fd9c --- /dev/null +++ b/roles/translator/src/lib/sv1/sv1_server/sv1_server.rs @@ -0,0 +1,606 @@ +use crate::{ + config::TranslatorConfig, + error::TproxyError, + status::{handle_error, Status, StatusSender}, + sv1::{ + downstream::{downstream::Downstream, DownstreamMessages}, + sv1_server::{channel::Sv1ServerChannelState, data::Sv1ServerData}, + translation_utils::{create_notify, get_set_difficulty}, + }, + task_manager::TaskManager, + utils::ShutdownMessage, +}; +use async_channel::{Receiver, Sender}; +use network_helpers_sv2::sv1_connection::ConnectionSV1; +use roles_logic_sv2::{ + mining_sv2::{SubmitSharesExtended, Target}, + parsers_sv2::Mining, + utils::{hash_rate_to_target, Mutex}, + vardiff::classic::VardiffState, + Vardiff, +}; +use std::{ + collections::HashMap, + net::SocketAddr, + sync::{ + atomic::{AtomicBool, AtomicU32, Ordering}, + Arc, RwLock, + }, + time::Duration, +}; +use tokio::{ + net::TcpListener, + sync::{broadcast, mpsc}, + time, +}; +use tracing::{debug, error, info, warn}; + +/// SV1 server that handles connections from SV1 miners. +/// +/// This struct manages the SV1 server component of the translator, which: +/// - Accepts connections from SV1 miners +/// - Manages difficulty adjustment for connected miners +/// - Coordinates with the SV2 channel manager for upstream communication +/// - Tracks mining jobs and share submissions +/// +/// The server maintains state for multiple downstream connections and implements +/// variable difficulty adjustment based on share submission rates. +pub struct Sv1Server { + sv1_server_channel_state: Sv1ServerChannelState, + sv1_server_data: Arc>, + shares_per_minute: f32, + listener_addr: SocketAddr, + config: TranslatorConfig, + clean_job: AtomicBool, + sequence_counter: AtomicU32, + miner_counter: AtomicU32, +} + +impl Sv1Server { + /// Drops the server's channel state, cleaning up resources. + pub fn drop(&self) { + self.sv1_server_channel_state.drop(); + } + + /// Creates a new SV1 server instance. + /// + /// # Arguments + /// * `listener_addr` - The socket address to bind the server to + /// * `channel_manager_receiver` - Channel to receive messages from the channel manager + /// * `channel_manager_sender` - Channel to send messages to the channel manager + /// * `config` - Configuration settings for the translator + /// + /// # Returns + /// A new Sv1Server instance ready to accept connections + pub fn new( + listener_addr: SocketAddr, + channel_manager_receiver: Receiver>, + channel_manager_sender: Sender>, + config: TranslatorConfig, + ) -> Self { + let shares_per_minute = config.downstream_difficulty_config.shares_per_minute; + let sv1_server_channel_state = + Sv1ServerChannelState::new(channel_manager_receiver, channel_manager_sender); + let sv1_server_data = Arc::new(Mutex::new(Sv1ServerData::new())); + Self { + sv1_server_channel_state, + sv1_server_data, + config, + listener_addr, + shares_per_minute, + clean_job: AtomicBool::new(true), + miner_counter: AtomicU32::new(0), + sequence_counter: AtomicU32::new(0), + } + } + + /// Starts the SV1 server and begins accepting connections. + /// + /// This method: + /// - Binds to the configured listening address + /// - Spawns the variable difficulty adjustment loop + /// - Enters the main event loop to handle: + /// - New miner connections + /// - Shutdown signals + /// - Messages from downstream miners (submit shares) + /// - Messages from upstream SV2 channel manager + /// + /// The server will continue running until a shutdown signal is received. + /// + /// # Arguments + /// * `notify_shutdown` - Broadcast channel for shutdown coordination + /// * `shutdown_complete_tx` - Channel to signal shutdown completion + /// * `status_sender` - Channel for sending status updates + /// * `task_manager` - Manager for spawned async tasks + /// + /// # Returns + /// * `Ok(())` - Server shut down gracefully + /// * `Err(TproxyError)` - Server encountered an error + pub async fn start( + self: Arc, + notify_shutdown: broadcast::Sender, + shutdown_complete_tx: mpsc::Sender<()>, + status_sender: Sender, + task_manager: Arc, + ) -> Result<(), TproxyError> { + info!("Starting SV1 server on {}", self.listener_addr); + let mut shutdown_rx_main = notify_shutdown.subscribe(); + let shutdown_complete_tx_main_clone = shutdown_complete_tx.clone(); + + // get the first target for the first set difficulty message + let first_target: Target = hash_rate_to_target( + self.config + .downstream_difficulty_config + .min_individual_miner_hashrate as f64, + self.config.downstream_difficulty_config.shares_per_minute as f64, + ) + .unwrap() + .into(); + + // Spawn vardiff loop + task_manager.spawn(Self::spawn_vardiff_loop( + Arc::clone(&self), + notify_shutdown.subscribe(), + shutdown_complete_tx_main_clone.clone(), + )); + + let listener = TcpListener::bind(self.listener_addr).await.map_err(|e| { + error!("Failed to bind to {}: {}", self.listener_addr, e); + e + })?; + + let sv1_status_sender = StatusSender::Sv1Server(status_sender.clone()); + + loop { + tokio::select! { + message = shutdown_rx_main.recv() => { + match message { + Ok(ShutdownMessage::ShutdownAll) => { + info!("SV1 Server: Vardiff loop received shutdown signal. Exiting."); + break; + } + Ok(ShutdownMessage::DownstreamShutdown(downstream_id)) => { + let current_downstream = self.sv1_server_data.super_safe_lock(|d| d.downstreams.remove(&downstream_id)); + if current_downstream.is_some() { + info!("Downstream: {downstream_id} removed from sv1 server downstreams"); + } + } + Ok(ShutdownMessage::DownstreamShutdownAll) => { + self.sv1_server_data.super_safe_lock(|d|{d.downstreams = HashMap::new();}); + info!("All downstream removed from sv1 server downstreams as upstream changed"); + } + _ => {} + } + } + result = listener.accept() => { + match result { + Ok((stream, addr)) => { + info!("New SV1 downstream connection from {}", addr); + + let connection = ConnectionSV1::new(stream).await; + let downstream_id = self.sv1_server_data.super_safe_lock(|v| v.downstream_id_factory.next()); + let downstream = Arc::new(Downstream::new( + downstream_id, + connection.sender().clone(), + connection.receiver().clone(), + self.sv1_server_channel_state.downstream_to_sv1_server_sender.clone(), + self.sv1_server_channel_state.sv1_server_to_downstream_sender.clone().subscribe(), + first_target.clone(), + self.config + .downstream_difficulty_config + .min_individual_miner_hashrate, + )); + // vardiff initialization + let vardiff = Arc::new(RwLock::new(VardiffState::new().expect("Failed to create vardiffstate"))); + _ = self.sv1_server_data + .safe_lock(|d| { + d.downstreams.insert(downstream_id, downstream.clone()); + // Insert vardiff state for this downstream + d.vardiff.insert(downstream_id, vardiff); + }); + info!("Downstream {} registered successfully", downstream_id); + + self + .open_extended_mining_channel(downstream.clone()) + .await?; + } + Err(e) => { + warn!("Failed to accept new connection: {:?}", e); + } + } + } + res = Self::handle_downstream_message( + Arc::clone(&self) + ) => { + if let Err(e) = res { + handle_error(&sv1_status_sender, e).await; + break; + } + } + res = Self::handle_upstream_message( + Arc::clone(&self), + first_target.clone(), + notify_shutdown.clone(), + shutdown_complete_tx_main_clone.clone(), + status_sender.clone(), + task_manager.clone() + ) => { + if let Err(e) = res { + handle_error(&sv1_status_sender, e).await; + break; + } + } + } + } + self.sv1_server_channel_state.drop(); + drop(shutdown_complete_tx); + warn!("SV1 Server main listener loop exited."); + Ok(()) + } + + /// Handles messages received from downstream SV1 miners. + /// + /// This method processes share submissions from miners by: + /// - Updating variable difficulty counters + /// - Extracting and validating share data + /// - Converting SV1 share format to SV2 SubmitSharesExtended + /// - Forwarding the share to the channel manager for upstream submission + /// + /// # Returns + /// * `Ok(())` - Message processed successfully + /// * `Err(TproxyError)` - Error processing the message + pub async fn handle_downstream_message(self: Arc) -> Result<(), TproxyError> { + let downstream_message = self + .sv1_server_channel_state + .downstream_to_sv1_server_receiver + .recv() + .await + .map_err(TproxyError::ChannelErrorReceiver)?; + + let DownstreamMessages::SubmitShares(message) = downstream_message; + + // Increment vardiff counter for this downstream + self.sv1_server_data.safe_lock(|v| { + if let Some(vardiff_state) = v.vardiff.get(&message.downstream_id) { + vardiff_state + .write() + .unwrap() + .increment_shares_since_last_update(); + } + })?; + + let last_job_version = message.last_job_version.ok_or_else(|| { + TproxyError::RolesSv2LogicError(roles_logic_sv2::errors::Error::NoValidJob) + })?; + + let version = match (message.share.version_bits, message.version_rolling_mask) { + (Some(version_bits), Some(rolling_mask)) => { + (last_job_version & !rolling_mask.0) | (version_bits.0 & rolling_mask.0) + } + (None, None) => last_job_version, + _ => return Err(TproxyError::SV1Error), + }; + + let extranonce: Vec = message.share.extra_nonce2.into(); + + let submit_share_extended = SubmitSharesExtended { + channel_id: message.channel_id, + sequence_number: self.sequence_counter.load(Ordering::SeqCst), + job_id: message.share.job_id.parse::()?, + nonce: message.share.nonce.0, + ntime: message.share.time.0, + version, + extranonce: extranonce + .try_into() + .map_err(|_| TproxyError::General("Invalid extranonce length".into()))?, + }; + + self.sv1_server_channel_state + .channel_manager_sender + .send(Mining::SubmitSharesExtended(submit_share_extended)) + .await + .map_err(|_| TproxyError::ChannelErrorSender)?; + + self.sequence_counter.fetch_add(1, Ordering::SeqCst); + + Ok(()) + } + + /// Handles messages received from the upstream SV2 server via the channel manager. + /// + /// This method processes various SV2 messages including: + /// - OpenExtendedMiningChannelSuccess: Sets up downstream connections + /// - NewExtendedMiningJob: Converts to SV1 notify messages + /// - SetNewPrevHash: Updates block template information + /// - Channel error messages (TODO: implement proper handling) + /// + /// # Arguments + /// * `first_target` - Initial difficulty target for new connections + /// * `notify_shutdown` - Broadcast channel for shutdown coordination + /// * `shutdown_complete_tx` - Channel to signal shutdown completion + /// * `status_sender` - Channel for sending status updates + /// * `task_manager` - Manager for spawned async tasks + /// + /// # Returns + /// * `Ok(())` - Message processed successfully + /// * `Err(TproxyError)` - Error processing the message + pub async fn handle_upstream_message( + self: Arc, + first_target: Target, + notify_shutdown: broadcast::Sender, + shutdown_complete_tx: mpsc::Sender<()>, + status_sender: Sender, + task_manager: Arc, + ) -> Result<(), TproxyError> { + let message = self + .sv1_server_channel_state + .channel_manager_receiver + .recv() + .await + .map_err(TproxyError::ChannelErrorReceiver)?; + + match message { + Mining::OpenExtendedMiningChannelSuccess(m) => { + let downstream_id = m.request_id; + let downstreams = self + .sv1_server_data + .super_safe_lock(|v| v.downstreams.clone()); + if let Some(downstream) = Self::get_downstream(downstream_id, downstreams) { + downstream.downstream_data.safe_lock(|d| { + d.extranonce1 = m.extranonce_prefix.to_vec(); + d.extranonce2_len = m.extranonce_size.into(); + d.channel_id = Some(m.channel_id); + })?; + + let status_sender = StatusSender::Downstream { + downstream_id, + tx: status_sender.clone(), + }; + + Downstream::run_downstream_tasks( + downstream, + notify_shutdown, + shutdown_complete_tx, + status_sender, + task_manager, + ); + + let set_difficulty = get_set_difficulty(first_target).map_err(|_| { + TproxyError::General("Failed to generate set_difficulty".into()) + })?; + // send the set_difficulty message to the downstream + self.sv1_server_channel_state + .sv1_server_to_downstream_sender + .send((m.channel_id, None, set_difficulty)) + .map_err(|_| TproxyError::ChannelErrorSender)?; + } else { + error!("Downstream not found for downstream_id: {}", downstream_id); + } + } + + Mining::NewExtendedMiningJob(m) => { + info!( + "Received NewExtendedMiningJob for channel id: {}", + m.channel_id + ); + if let Some(prevhash) = self.sv1_server_data.super_safe_lock(|v| v.prevhash.clone()) + { + let notify = create_notify( + prevhash, + m.clone().into_static(), + self.clean_job.load(Ordering::SeqCst), + ); + self.clean_job.store(false, Ordering::SeqCst); + let _ = self + .sv1_server_channel_state + .sv1_server_to_downstream_sender + .send((m.channel_id, None, notify.into())); + } + } + + Mining::SetNewPrevHash(m) => { + info!("Received SetNewPrevHash for channel id: {}", m.channel_id); + self.clean_job.store(true, Ordering::SeqCst); + self.sv1_server_data + .super_safe_lock(|v| v.prevhash = Some(m.clone().into_static())); + } + + Mining::CloseChannel(_) => { + todo!("Handle CloseChannel message from upstream"); + } + + Mining::OpenMiningChannelError(_) => { + todo!("Handle OpenMiningChannelError message from upstream"); + } + + Mining::UpdateChannelError(_) => { + todo!("Handle UpdateChannelError message from upstream"); + } + + _ => unreachable!("Unexpected message type received from upstream"), + } + + Ok(()) + } + + /// Opens an extended mining channel for a downstream connection. + /// + /// This method initiates the SV2 channel setup process by: + /// - Calculating the initial target based on configuration + /// - Generating a unique user identity for the miner + /// - Creating an OpenExtendedMiningChannel message + /// - Sending the request to the channel manager + /// + /// # Arguments + /// * `downstream` - The downstream connection to set up a channel for + /// + /// # Returns + /// * `Ok(())` - Channel setup request sent successfully + /// * `Err(TproxyError)` - Error setting up the channel + pub async fn open_extended_mining_channel( + &self, + downstream: Arc, + ) -> Result<(), TproxyError> { + let config = &self.config.downstream_difficulty_config; + + let hashrate = config.min_individual_miner_hashrate as f64; + let shares_per_min = config.shares_per_minute as f64; + let min_extranonce_size = self.config.min_extranonce2_size; + + let initial_target: Target = hash_rate_to_target(hashrate, shares_per_min) + .unwrap() + .into(); + + let miner_id = self.miner_counter.fetch_add(1, Ordering::SeqCst) + 1; + let user_identity = format!("{}.miner{}", self.config.user_identity, miner_id); + + downstream + .downstream_data + .safe_lock(|d| d.user_identity = user_identity.clone())?; + + let open_channel_msg = roles_logic_sv2::mining_sv2::OpenExtendedMiningChannel { + request_id: downstream + .downstream_data + .super_safe_lock(|d| d.downstream_id), + user_identity: user_identity.try_into()?, + nominal_hash_rate: hashrate as f32, + max_target: initial_target.into(), + min_extranonce_size, + }; + + self.sv1_server_channel_state + .channel_manager_sender + .send(Mining::OpenExtendedMiningChannel(open_channel_msg)) + .await + .map_err(|_| TproxyError::ChannelErrorSender)?; + + Ok(()) + } + + /// Retrieves a downstream connection by ID from the provided map. + /// + /// # Arguments + /// * `downstream_id` - The ID of the downstream connection to find + /// * `downstream` - HashMap containing downstream connections + /// + /// # Returns + /// * `Some(Downstream)` - If a downstream with the given ID exists + /// * `None` - If no downstream with the given ID is found + pub fn get_downstream( + downstream_id: u32, + downstream: HashMap>, + ) -> Option> { + downstream.get(&downstream_id).cloned() + } + + /// Extracts the downstream ID from a Downstream instance. + /// + /// # Arguments + /// * `downstream` - The downstream connection to get the ID from + /// + /// # Returns + /// The downstream ID as a u32 + pub fn get_downstream_id(downstream: Downstream) -> u32 { + downstream + .downstream_data + .super_safe_lock(|s| s.downstream_id) + } + + /// This method implements the SV1 server's variable difficulty logic for all downstreams. + /// Every 60 seconds, this method updates the difficulty state for each downstream. + async fn spawn_vardiff_loop( + self: Arc, + mut notify_shutdown: broadcast::Receiver, + shutdown_complete_tx: mpsc::Sender<()>, + ) { + info!("Spawning vardiff adjustment loop for SV1 server"); + + 'vardiff_loop: loop { + tokio::select! { + message = notify_shutdown.recv() => { + match message { + Ok(ShutdownMessage::ShutdownAll) => { + info!("SV1 Server: Vardiff loop received shutdown signal. Exiting."); + break 'vardiff_loop; + } + Ok(ShutdownMessage::DownstreamShutdown(downstream_id)) => { + let current_downstream = self.sv1_server_data.super_safe_lock(|d| d.downstreams.remove(&downstream_id)); + if current_downstream.is_some() { + info!("Downstream: {downstream_id} removed from sv1 server downstreams"); + } + } + Ok(ShutdownMessage::DownstreamShutdownAll) => { + self.sv1_server_data.super_safe_lock(|d|{d.downstreams = HashMap::new();}); + info!("All downstream removed from sv1 server downstreams as upstream changed"); + } + _ => {} + } + } + _ = time::sleep(Duration::from_secs(60)) => { + let vardiff_map = self.sv1_server_data.super_safe_lock(|v| v.vardiff.clone()); + let mut updates = Vec::new(); + for (downstream_id, vardiff_state) in vardiff_map.iter() { + debug!("Updating vardiff for downstream_id: {}", downstream_id); + let mut vardiff = vardiff_state.write().unwrap(); + // Get hashrate and target from downstreams + let Some((channel_id, hashrate, target)) = self.sv1_server_data.super_safe_lock(|data| { + data.downstreams.get(downstream_id).and_then(|ds| { + ds.downstream_data.super_safe_lock(|d| Some((d.channel_id, d.hashrate, d.target.clone()))) + }) + }) else { + continue; + }; + + if channel_id.is_none() { + error!("Channel id is none for downstream_id: {}", downstream_id); + continue; + } + let channel_id = channel_id.unwrap(); + let new_hashrate_opt = vardiff.try_vardiff(hashrate, &target, self.shares_per_minute); + + if let Ok(Some(new_hashrate)) = new_hashrate_opt { + // Calculate new target based on new hashrate + let new_target: Target = + hash_rate_to_target(new_hashrate as f64, self.shares_per_minute as f64) + .unwrap() + .into(); + + // Update the downstream's pending target and hashrate + _ = self.sv1_server_data.safe_lock(|dmap| { + if let Some(d) = dmap.downstreams.get(downstream_id) { + _ = d.downstream_data.safe_lock(|d| { + d.set_pending_target_and_hashrate(new_target.clone(), new_hashrate); + }); + } + }); + + updates.push((channel_id, Some(*downstream_id), new_target.clone())); + + debug!( + "Calculated new target for downstream_id={} to {:?}", + downstream_id, new_target + ); + } + } + + for (channel_id, downstream_id, target) in updates { + if let Ok(set_difficulty_msg) = get_set_difficulty(target) { + if let Err(e) = + self.sv1_server_channel_state.sv1_server_to_downstream_sender.send((channel_id, downstream_id, set_difficulty_msg)) + { + error!( + "Failed to send SetDifficulty message to downstream {}: {:?}", + downstream_id.unwrap_or(0), + e + ); + break 'vardiff_loop; + } + } + } + } + } + } + drop(shutdown_complete_tx); + warn!("SV1 Server: Vardiff loop exited."); + } +} diff --git a/roles/translator/src/lib/sv1/translation_utils.rs b/roles/translator/src/lib/sv1/translation_utils.rs new file mode 100644 index 0000000000..68c43b41cc --- /dev/null +++ b/roles/translator/src/lib/sv1/translation_utils.rs @@ -0,0 +1,143 @@ +use primitive_types::U256; +use roles_logic_sv2::{ + job_creator::extended_job_to_non_segwit, + mining_sv2::{NewExtendedMiningJob, SetNewPrevHash, Target}, +}; +use std::ops::Div; +use tracing::debug; +use v1::{ + json_rpc, server_to_client, + utils::{HexU32Be, MerkleNode, PrevHash}, +}; + +use crate::error::TproxyError; + +/// Creates a new SV1 `mining.notify` message from SV2 messages. +/// +/// This function translates SV2 `SetNewPrevHash` and `NewExtendedMiningJob` messages +/// into a corresponding SV1 `mining.notify` message that can be sent to downstream +/// SV1 miners. +/// +/// The function performs the following conversions: +/// - Converts the extended mining job to non-segwit format +/// - Extracts the previous block hash +/// - Converts coinbase transaction prefix and suffix +/// - Transforms the merkle path into SV1 format +/// - Sets appropriate version, bits, and timestamp fields +/// +/// # Arguments +/// * `new_prev_hash` - SV2 message containing the previous block hash information +/// * `new_job` - SV2 message containing the new mining job details +/// * `clean_jobs` - Whether miners should abandon previous jobs +/// +/// # Returns +/// A properly formatted SV1 `mining.notify` message +pub fn create_notify( + new_prev_hash: SetNewPrevHash<'static>, + new_job: NewExtendedMiningJob<'static>, + clean_jobs: bool, +) -> server_to_client::Notify<'static> { + // TODO 32 must be changed! + let new_job = extended_job_to_non_segwit(new_job, 32) + .expect("failed to convert extended job to non segwit"); + // Make sure that SetNewPrevHash + NewExtendedMiningJob is matching (not future) + let job_id = new_job.job_id.to_string(); + + // U256<'static> -> MerkleLeaf + let prev_hash = PrevHash(new_prev_hash.prev_hash.clone()); + + // B064K<'static'> -> HexBytes + let coin_base1 = new_job.coinbase_tx_prefix.to_vec().into(); + let coin_base2 = new_job.coinbase_tx_suffix.to_vec().into(); + + // Seq0255<'static, U56<'static>> -> Vec> + let merkle_path = new_job.merkle_path.clone().into_static().0; + let merkle_branch: Vec = merkle_path.into_iter().map(MerkleNode).collect(); + + // u32 -> HexBytes + let version = HexU32Be(new_job.version); + let bits = HexU32Be(new_prev_hash.nbits); + let time = HexU32Be(match new_job.is_future() { + true => new_prev_hash.min_ntime, + false => new_job.min_ntime.clone().into_inner().unwrap(), + }); + + let notify_response = server_to_client::Notify { + job_id, + prev_hash, + coin_base1, + coin_base2, + merkle_branch, + version, + bits, + time, + clean_jobs, + }; + debug!("\nNextMiningNotify: {:?}\n", notify_response); + notify_response +} + +/// Converts an SV2 target into an SV1 `mining.set_difficulty` message. +/// +/// This function takes an SV2 target value and converts it to the corresponding +/// difficulty value that should be sent to SV1 miners via the `mining.set_difficulty` +/// message. +/// +/// # Arguments +/// * `target` - The SV2 target value to convert +/// +/// # Returns +/// * `Ok(json_rpc::Message)` - The properly formatted SV1 set_difficulty message +/// * `Err(TproxyError)` - If the target conversion fails +pub fn get_set_difficulty(target: Target) -> Result { + let value = difficulty_from_target(target)?; + debug!("Difficulty from target: {:?}", value); + let set_target = v1::methods::server_to_client::SetDifficulty { value }; + let message: json_rpc::Message = set_target.into(); + Ok(message) +} + +/// Converts target received by the `SetTarget` SV2 message from the Upstream role into the +/// difficulty for the Downstream role sent via the SV1 `mining.set_difficulty` message. +#[allow(clippy::result_large_err)] +pub(super) fn difficulty_from_target(target: Target) -> Result { + // reverse because target is LE and this function relies on BE + let mut target = binary_sv2::U256::from(target).to_vec(); + + target.reverse(); + + let target = target.as_slice(); + debug!("Target: {:?}", target); + + // If received target is 0, return 0 + if is_zero(target) { + return Ok(0.0); + } + let target = U256::from_big_endian(target); + let pdiff: [u8; 32] = [ + 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + ]; + let pdiff = U256::from_big_endian(pdiff.as_ref()); + + if pdiff > target { + let diff = pdiff.div(target); + Ok(diff.low_u64() as f64) + } else { + let diff = target.div(pdiff); + let diff = diff.low_u64() as f64; + // TODO still results in a difficulty that is too low + Ok(1.0 / diff) + } +} + +/// Helper function to check if target is set to zero for some reason (typically happens when +/// Downstream role first connects). +/// https://stackoverflow.com/questions/65367552/checking-a-vecu8-to-see-if-its-all-zero +fn is_zero(buf: &[u8]) -> bool { + let (prefix, aligned, suffix) = unsafe { buf.align_to::() }; + + prefix.iter().all(|&x| x == 0) + && suffix.iter().all(|&x| x == 0) + && aligned.iter().all(|&x| x == 0) +} diff --git a/roles/translator/src/lib/sv2/channel_manager/channel.rs b/roles/translator/src/lib/sv2/channel_manager/channel.rs new file mode 100644 index 0000000000..bb0f58cbfd --- /dev/null +++ b/roles/translator/src/lib/sv2/channel_manager/channel.rs @@ -0,0 +1,36 @@ +use crate::sv2::upstream::upstream::EitherFrame; +use async_channel::{Receiver, Sender}; +use roles_logic_sv2::parsers_sv2::Mining; +use tracing::debug; + +#[derive(Clone, Debug)] +pub struct ChannelState { + pub upstream_sender: Sender, + pub upstream_receiver: Receiver, + pub sv1_server_sender: Sender>, + pub sv1_server_receiver: Receiver>, +} + +impl ChannelState { + pub fn new( + upstream_sender: Sender, + upstream_receiver: Receiver, + sv1_server_sender: Sender>, + sv1_server_receiver: Receiver>, + ) -> Self { + Self { + upstream_sender, + upstream_receiver, + sv1_server_sender, + sv1_server_receiver, + } + } + + pub fn drop(&self) { + debug!("Dropping channel manager channels"); + self.upstream_receiver.close(); + self.upstream_sender.close(); + self.sv1_server_receiver.close(); + self.sv1_server_sender.close(); + } +} diff --git a/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs b/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs new file mode 100644 index 0000000000..1498e36167 --- /dev/null +++ b/roles/translator/src/lib/sv2/channel_manager/channel_manager.rs @@ -0,0 +1,483 @@ +use crate::{ + error::TproxyError, + status::{handle_error, Status, StatusSender}, + sv2::{ + channel_manager::{ + channel::ChannelState, + data::{ChannelManagerData, ChannelMode}, + }, + upstream::upstream::{EitherFrame, Message, StdFrame}, + }, + task_manager::TaskManager, + utils::{into_static, ShutdownMessage}, +}; +use async_channel::{Receiver, Sender}; +use codec_sv2::Frame; +use roles_logic_sv2::{ + channels_sv2::client::extended::ExtendedChannel, + handlers_sv2::ParseMiningMessagesFromUpstreamAsync, + mining_sv2::OpenExtendedMiningChannelSuccess, + parsers_sv2::{AnyMessage, Mining}, + utils::Mutex, +}; +use std::sync::{Arc, RwLock}; +use tokio::sync::{broadcast, mpsc}; +use tracing::{error, info, warn}; + +/// Type alias for SV2 mining messages with static lifetime +pub type Sv2Message = Mining<'static>; + +/// Manages SV2 channels and message routing between upstream and downstream. +/// +/// The ChannelManager serves as the central component that bridges SV2 upstream +/// connections with SV1 downstream connections. It handles: +/// - SV2 channel lifecycle management (open, close, error handling) +/// - Message translation and routing between protocols +/// - Extranonce management for aggregated vs non-aggregated modes +/// - Share submission processing and validation +/// - Job distribution to downstream connections +/// +/// The manager supports two operational modes: +/// - Aggregated: All downstream connections share a single extended channel +/// - Non-aggregated: Each downstream connection gets its own extended channel +/// +/// This design allows the translator to efficiently manage multiple mining +/// connections while maintaining proper isolation and state management. +#[derive(Debug, Clone)] +pub struct ChannelManager { + pub channel_state: ChannelState, + pub channel_manager_data: Arc>, +} + +impl ChannelManager { + /// Creates a new ChannelManager instance. + /// + /// # Arguments + /// * `upstream_sender` - Channel to send messages to upstream + /// * `upstream_receiver` - Channel to receive messages from upstream + /// * `sv1_server_sender` - Channel to send messages to SV1 server + /// * `sv1_server_receiver` - Channel to receive messages from SV1 server + /// * `mode` - Operating mode (Aggregated or NonAggregated) + /// + /// # Returns + /// A new ChannelManager instance ready to handle message routing + pub fn new( + upstream_sender: Sender, + upstream_receiver: Receiver, + sv1_server_sender: Sender>, + sv1_server_receiver: Receiver>, + mode: ChannelMode, + ) -> Self { + let channel_state = ChannelState::new( + upstream_sender, + upstream_receiver, + sv1_server_sender, + sv1_server_receiver, + ); + let channel_manager_data = Arc::new(Mutex::new(ChannelManagerData::new(mode))); + Self { + channel_state, + channel_manager_data, + } + } + + /// Spawns and runs the main channel manager task loop. + /// + /// This method creates an async task that handles all message routing for the + /// channel manager. The task runs a select loop that processes: + /// - Shutdown signals for graceful termination + /// - Messages from upstream SV2 server + /// - Messages from downstream SV1 server + /// + /// The task continues running until a shutdown signal is received or an + /// unrecoverable error occurs. It ensures proper cleanup of resources + /// and error reporting. + /// + /// # Arguments + /// * `notify_shutdown` - Broadcast channel for receiving shutdown signals + /// * `shutdown_complete_tx` - Channel to signal when shutdown is complete + /// * `status_sender` - Channel for sending status updates and errors + /// * `task_manager` - Manager for tracking spawned tasks + pub async fn run_channel_manager_tasks( + self: Arc, + notify_shutdown: broadcast::Sender, + shutdown_complete_tx: mpsc::Sender<()>, + status_sender: Sender, + task_manager: Arc, + ) { + let mut shutdown_rx = notify_shutdown.subscribe(); + info!("Spawning run channel manager task"); + let status_sender = StatusSender::ChannelManager(status_sender); + task_manager.spawn(async move { + loop { + tokio::select! { + message = shutdown_rx.recv() => { + if let Ok(ShutdownMessage::ShutdownAll) = message { + info!("ChannelManager: received shutdown signal."); + break; + } + } + res = Self::handle_upstream_message(self.clone()) => { + if let Err(e) = res { + handle_error(&status_sender, e).await; + break; + } + }, + res = Self::handle_downstream_message(self.clone()) => { + if let Err(e) = res { + handle_error(&status_sender, e).await; + break; + } + }, + else => { + warn!("All channel manager message streams closed. Exiting..."); + break; + } + } + } + + self.channel_state.drop(); + drop(shutdown_complete_tx); + warn!("ChannelManager: unified message loop exited."); + }); + } + + /// Handles messages received from the upstream SV2 server. + /// + /// This method processes SV2 messages from upstream and routes them appropriately: + /// - Mining messages: Processed through the roles logic and forwarded to SV1 server + /// - Channel responses: Handled to manage channel lifecycle + /// - Job notifications: Converted and distributed to downstream connections + /// - Error messages: Logged and handled appropriately + /// + /// The method implements the core SV2 protocol logic for channel management, + /// including handling both aggregated and non-aggregated channel modes. + /// + /// # Returns + /// * `Ok(())` - Message processed successfully + /// * `Err(TproxyError)` - Error processing the message + pub async fn handle_upstream_message(self: Arc) -> Result<(), TproxyError> { + let mut channel_manager = self.get_channel_manager(); + let message = self + .channel_state + .upstream_receiver + .recv() + .await + .map_err(TproxyError::ChannelErrorReceiver)?; + + let Frame::Sv2(mut frame) = message else { + warn!("Received non-SV2 frame from upstream"); + return Ok(()); + }; + + let header = frame.get_header().ok_or_else(|| { + error!("Missing header in SV2 frame"); + TproxyError::General("Missing frame header".into()) + })?; + + let message_type = header.msg_type(); + let mut payload = frame.payload().to_vec(); + + let message: AnyMessage<'_> = into_static( + (message_type, payload.as_mut_slice()) + .try_into() + .map_err(|e| { + error!("Failed to parse upstream frame into AnyMessage: {:?}", e); + TproxyError::General("Failed to parse AnyMessage".into()) + })?, + )?; + + match message { + Message::Mining(_) => { + channel_manager + .handle_mining_message(message_type, &mut payload) + .await?; + } + _ => { + warn!("Unhandled upstream message type: {:?}", message); + } + } + + Ok(()) + } + + /// Handles messages received from the downstream SV1 server. + /// + /// This method processes requests from the SV1 server, primarily: + /// - OpenExtendedMiningChannel: Sets up new SV2 channels for downstream connections + /// - SubmitSharesExtended: Processes share submissions from miners + /// + /// For channel opening, the method handles both aggregated and non-aggregated modes: + /// - Aggregated: Creates extended channels using extranonce prefixes + /// - Non-aggregated: Opens individual extended channels with the upstream for each downstream + /// + /// Share submissions are validated, processed through the channel logic, + /// and forwarded to the upstream server with appropriate extranonce handling. + /// + /// # Returns + /// * `Ok(())` - Message processed successfully + /// * `Err(TproxyError)` - Error processing the message + pub async fn handle_downstream_message(self: Arc) -> Result<(), TproxyError> { + let message = self + .channel_state + .sv1_server_receiver + .recv() + .await + .map_err(TproxyError::ChannelErrorReceiver)?; + match message { + Mining::OpenExtendedMiningChannel(m) => { + let mut open_channel_msg = m.clone(); + let mut user_identity = std::str::from_utf8(m.user_identity.as_ref()) + .map(|s| s.to_string()) + .unwrap_or_else(|_| "unknown".to_string()); + let hashrate = m.nominal_hash_rate; + let min_extranonce_size = m.min_extranonce_size as usize; + let mode = self + .channel_manager_data + .super_safe_lock(|c| c.mode.clone()); + + if mode == ChannelMode::Aggregated { + if self + .channel_manager_data + .super_safe_lock(|c| c.upstream_extended_channel.is_some()) + { + // We already have the unique channel open and so we create a new + // extranonce prefix and we send the + // OpenExtendedMiningChannelSuccess message directly to the sv1 + // server + let target = self.channel_manager_data.super_safe_lock(|c| { + c.upstream_extended_channel + .as_ref() + .unwrap() + .read() + .unwrap() + .get_target() + .clone() + }); + let new_extranonce_prefix = + self.channel_manager_data.super_safe_lock(|c| { + c.extranonce_prefix_factory + .as_ref() + .unwrap() + .safe_lock(|e| { + e.next_prefix_extended( + open_channel_msg.min_extranonce_size.into(), + ) + }) + .ok() + .and_then(|r| r.ok()) + }); + let new_extranonce_size = self.channel_manager_data.super_safe_lock(|c| { + c.extranonce_prefix_factory + .as_ref() + .unwrap() + .safe_lock(|e| e.get_range2_len()) + .unwrap() + }); + if let Some(new_extranonce_prefix) = new_extranonce_prefix { + if new_extranonce_size >= open_channel_msg.min_extranonce_size as usize + { + let next_channel_id = + self.channel_manager_data.super_safe_lock(|c| { + c.extended_channels.keys().max().unwrap_or(&0) + 1 + }); + let new_downstream_extended_channel = ExtendedChannel::new( + next_channel_id, + user_identity.clone(), + new_extranonce_prefix + .clone() + .into_b032() + .into_static() + .to_vec(), + target.clone(), + hashrate, + true, + new_extranonce_size as u16, + ); + self.channel_manager_data.super_safe_lock(|c| { + c.extended_channels.insert( + next_channel_id, + Arc::new(RwLock::new(new_downstream_extended_channel)), + ); + }); + let success_message = Mining::OpenExtendedMiningChannelSuccess( + OpenExtendedMiningChannelSuccess { + request_id: open_channel_msg.request_id, + channel_id: next_channel_id, + target: target.clone().into(), + extranonce_size: new_extranonce_size as u16, + extranonce_prefix: new_extranonce_prefix.clone().into(), + }, + ); + self.channel_state + .sv1_server_sender + .send(success_message) + .await + .map_err(|e| { + error!( + "Failed to send open channel message to upstream: {:?}", + e + ); + TproxyError::ChannelErrorSender + })?; + // send the last active job to the sv1 server + let last_active_job = + self.channel_manager_data.super_safe_lock(|c| { + c.upstream_extended_channel + .as_ref() + .and_then(|ch| ch.read().ok()) + .and_then(|ch| ch.get_active_job().map(|j| j.0.clone())) + }); + + if let Some(mut job) = last_active_job { + job.channel_id = next_channel_id; + self.channel_manager_data.super_safe_lock(|c| { + if let Some(ch) = c.extended_channels.get(&next_channel_id) + { + ch.write() + .unwrap() + .on_new_extended_mining_job(job.clone()); + } + }); + self.channel_state + .sv1_server_sender + .send(Mining::NewExtendedMiningJob(job.clone())) + .await + .map_err(|e| { + error!("Failed to send last new extended mining job to upstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + } + } + } + return Ok(()); + } else { + // We don't have the unique channel open yet and so we send the + // OpenExtendedMiningChannel message to the upstream + // Before doing that we need to truncate the user identity at the + // first dot and append .translator-proxy + // Truncate at the first dot and append .translator-proxy + let translator_identity = if let Some(dot_index) = user_identity.find('.') { + format!("{}.translator-proxy", &user_identity[..dot_index]) + } else { + format!("{user_identity}.translator-proxy") + }; + user_identity = translator_identity; + open_channel_msg.user_identity = + user_identity.as_bytes().to_vec().try_into().unwrap(); + } + } + // Store the user identity and hashrate + self.channel_manager_data.super_safe_lock(|c| { + c.pending_channels.insert( + open_channel_msg.request_id, + (user_identity, hashrate, min_extranonce_size), + ); + }); + + let frame = StdFrame::try_from(Message::Mining( + roles_logic_sv2::parsers_sv2::Mining::OpenExtendedMiningChannel( + open_channel_msg, + ), + )) + .map_err(TproxyError::ParserError)?; + self.channel_state + .upstream_sender + .send(frame.into()) + .await + .map_err(|e| { + error!("Failed to send open channel message to upstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + } + Mining::SubmitSharesExtended(mut m) => { + let value = self.channel_manager_data.super_safe_lock(|c| { + let extended_channel = c.extended_channels.get(&m.channel_id); + if let Some(extended_channel) = extended_channel { + let channel = extended_channel.write(); + if let Ok(mut channel) = channel { + return Some(( + channel.validate_share(m.clone()), + channel.get_share_accounting().clone(), + )); + } + } + None + }); + if let Some((Ok(_result), _share_accounting)) = value { + let mode = self + .channel_manager_data + .super_safe_lock(|c| c.mode.clone()); + if mode == ChannelMode::Aggregated + && self + .channel_manager_data + .super_safe_lock(|c| c.upstream_extended_channel.is_some()) + { + let upstream_extended_channel_id = + self.channel_manager_data.super_safe_lock(|c| { + let upstream_extended_channel = c + .upstream_extended_channel + .as_ref() + .unwrap() + .read() + .unwrap(); + upstream_extended_channel.get_channel_id() + }); + m.channel_id = upstream_extended_channel_id; // We need to set the channel id to the upstream extended + // channel id + // Get the downstream channel's extranonce prefix (contains + // upstream prefix + translator proxy prefix) + let downstream_extranonce_prefix = + self.channel_manager_data.super_safe_lock(|c| { + c.extended_channels.get(&m.channel_id).map(|channel| { + channel.read().unwrap().get_extranonce_prefix().clone() + }) + }); + // Get the length of the upstream prefix (range0) + let range0_len = self.channel_manager_data.super_safe_lock(|c| { + c.extranonce_prefix_factory + .as_ref() + .unwrap() + .safe_lock(|e| e.get_range0_len()) + .unwrap() + }); + if let Some(downstream_extranonce_prefix) = downstream_extranonce_prefix { + // Skip the upstream prefix (range0) and take the remaining + // bytes (translator proxy prefix) + let translator_prefix = &downstream_extranonce_prefix[range0_len..]; + // Create new extranonce: translator proxy prefix + miner's + // extranonce + let mut new_extranonce = translator_prefix.to_vec(); + new_extranonce.extend_from_slice(m.extranonce.as_ref()); + // Replace the original extranonce with the modified one for + // upstream submission + m.extranonce = new_extranonce.try_into()?; + } + } + let frame: StdFrame = Message::Mining(Mining::SubmitSharesExtended(m)) + .try_into() + .map_err(TproxyError::ParserError)?; + let frame: EitherFrame = frame.into(); + self.channel_state + .upstream_sender + .send(frame) + .await + .map_err(|e| { + error!("Error while sending message to upstream: {e:?}"); + TproxyError::ChannelErrorSender + })?; + } + } + _ => {} + } + + Ok(()) + } + + pub fn get_channel_manager(&self) -> ChannelManager { + ChannelManager { + channel_manager_data: self.channel_manager_data.clone(), + channel_state: self.channel_state.clone(), + } + } +} diff --git a/roles/translator/src/lib/sv2/channel_manager/data.rs b/roles/translator/src/lib/sv2/channel_manager/data.rs new file mode 100644 index 0000000000..e2714c5d96 --- /dev/null +++ b/roles/translator/src/lib/sv2/channel_manager/data.rs @@ -0,0 +1,64 @@ +use roles_logic_sv2::{ + channels_sv2::client::extended::ExtendedChannel, mining_sv2::ExtendedExtranonce, utils::Mutex, +}; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +/// Defines the operational mode for channel management. +/// +/// The channel manager can operate in two different modes that affect how +/// downstream connections are mapped to upstream SV2 channels: +#[derive(Debug, Clone, PartialEq, serde::Deserialize)] +pub enum ChannelMode { + /// All downstream connections share a single extended SV2 channel. + /// This mode uses extranonce prefix allocation to distinguish between + /// different downstream miners while presenting them as a single entity + /// to the upstream server. This is more efficient for pools with many + /// miners. + Aggregated, + /// Each downstream connection gets its own dedicated extended SV2 channel. + /// This mode provides complete isolation between downstream connections + /// but may be less efficient for large numbers of miners. + NonAggregated, +} + +/// Internal data structure for the ChannelManager. +/// +/// This struct maintains all the state needed for SV2 channel management, +/// including pending channel requests, active channels, and mode-specific +/// data structures like extranonce factories for aggregated mode. +#[derive(Debug, Clone)] +pub struct ChannelManagerData { + /// Store pending channel info by downstream_id: (user_identity, hashrate, + /// downstream_extranonce_len) + pub pending_channels: HashMap, + /// Map of active extended channels by channel ID + pub extended_channels: HashMap>>>, + /// The upstream extended channel used in aggregated mode + pub upstream_extended_channel: Option>>>, + /// Extranonce prefix factory for allocating unique prefixes in aggregated mode + pub extranonce_prefix_factory: Option>>, + /// Current operational mode + pub mode: ChannelMode, +} + +impl ChannelManagerData { + /// Creates a new ChannelManagerData instance. + /// + /// # Arguments + /// * `mode` - The operational mode (Aggregated or NonAggregated) + /// + /// # Returns + /// A new ChannelManagerData instance with empty state + pub fn new(mode: ChannelMode) -> Self { + Self { + pending_channels: HashMap::new(), + extended_channels: HashMap::new(), + upstream_extended_channel: None, + extranonce_prefix_factory: None, + mode, + } + } +} diff --git a/roles/translator/src/lib/sv2/channel_manager/message_handler.rs b/roles/translator/src/lib/sv2/channel_manager/message_handler.rs new file mode 100644 index 0000000000..f908e12297 --- /dev/null +++ b/roles/translator/src/lib/sv2/channel_manager/message_handler.rs @@ -0,0 +1,390 @@ +use std::sync::{Arc, RwLock}; + +use crate::{ + error::TproxyError, + sv1::downstream::downstream::Downstream, + sv2::{channel_manager::ChannelMode, ChannelManager}, + utils::proxy_extranonce_prefix_len, +}; +use roles_logic_sv2::{ + channels_sv2::client::extended::ExtendedChannel, + handlers_sv2::{HandlerError, ParseMiningMessagesFromUpstreamAsync}, + mining_sv2::{ + ExtendedExtranonce, Extranonce, NewExtendedMiningJob, OpenExtendedMiningChannelSuccess, + SetNewPrevHash, SetTarget, MAX_EXTRANONCE_LEN, + }, + parsers_sv2::Mining, + utils::Mutex, +}; + +use tracing::{debug, error, info, warn}; + +impl ParseMiningMessagesFromUpstreamAsync for ChannelManager { + fn get_channel_type(&self) -> roles_logic_sv2::handlers_sv2::SupportedChannelTypes { + roles_logic_sv2::handlers_sv2::SupportedChannelTypes::Extended + } + + fn is_work_selection_enabled(&self) -> bool { + false + } + + async fn handle_open_standard_mining_channel_success( + &mut self, + _m: roles_logic_sv2::mining_sv2::OpenStandardMiningChannelSuccess<'_>, + ) -> Result<(), HandlerError> { + unreachable!() + } + + async fn handle_open_extended_mining_channel_success( + &mut self, + m: OpenExtendedMiningChannelSuccess<'_>, + ) -> Result<(), HandlerError> { + let success = self.channel_manager_data.safe_lock(|channel_manager_data| { + // Get the stored user identity and hashrate using request_id as downstream_id + let (user_identity, nominal_hashrate, downstream_extranonce_len) = channel_manager_data + .pending_channels + .remove(&m.request_id) + .unwrap_or_else(|| ("unknown".to_string(), 100000.0, 0_usize)); + info!( + "Received OpenExtendedMiningChannelSuccess with request id: {} and channel id: {}, user: {}, hashrate: {}", + m.request_id, m.channel_id, user_identity, nominal_hashrate + ); + let extranonce_prefix = m.extranonce_prefix.clone().into_static().to_vec(); + let target = m.target.clone().into_static(); + let version_rolling = true; // we assume this is always true on extended channels + let extended_channel = ExtendedChannel::new( + m.channel_id, + user_identity.clone(), + extranonce_prefix.clone(), + target.clone().into(), + nominal_hashrate, + version_rolling, + m.extranonce_size, + ); + + // If we are in aggregated mode, we need to create a new extranonce prefix and insert the + // extended channel into the map + if channel_manager_data.mode == ChannelMode::Aggregated { + channel_manager_data.upstream_extended_channel = Some(Arc::new(RwLock::new(extended_channel.clone()))); + + let upstream_extranonce_prefix: Extranonce = m.extranonce_prefix.clone().into(); + let translator_proxy_extranonce_prefix_len = + proxy_extranonce_prefix_len(m.extranonce_size.into(), downstream_extranonce_len); + // range 0 is the extranonce1 from upstream + // range 1 is the extranonce1 added by the tproxy + // range 2 is the extranonce2 used by the miner for rolling (this is the one that is + // used for rolling) + let range_0 = 0..extranonce_prefix.len(); + let range1 = range_0.end..range_0.end + translator_proxy_extranonce_prefix_len; + let range2 = range1.end..MAX_EXTRANONCE_LEN; + let extended_extranonce_factory = ExtendedExtranonce::from_upstream_extranonce( + upstream_extranonce_prefix, + range_0, + range1, + range2, + ) + .unwrap(); + channel_manager_data.extranonce_prefix_factory = + Some(Arc::new(Mutex::new(extended_extranonce_factory))); + + let factory = channel_manager_data.extranonce_prefix_factory.as_ref().unwrap(); + let new_extranonce_size = factory.safe_lock(|f| f.get_range2_len()).unwrap() as u16; + if downstream_extranonce_len <= new_extranonce_size as usize { + let new_extranonce_prefix = factory + .safe_lock(|f| f.next_prefix_extended(new_extranonce_size as usize)) + .unwrap() + .unwrap() + .into_b032(); + let new_downstream_extended_channel = ExtendedChannel::new( + m.channel_id, + user_identity.clone(), + new_extranonce_prefix.clone().into_static().to_vec(), + target.clone().into(), + nominal_hashrate, + true, + new_extranonce_size, + ); + channel_manager_data.extended_channels.insert( + m.channel_id, + Arc::new(RwLock::new(new_downstream_extended_channel)), + ); + let new_open_extended_mining_channel_success = OpenExtendedMiningChannelSuccess { + request_id: m.request_id, + channel_id: m.channel_id, + extranonce_prefix: new_extranonce_prefix, + extranonce_size: new_extranonce_size, + target: m.target.clone(), + }; + return new_open_extended_mining_channel_success.into_static(); + } + } + + // If we are not in aggregated mode, we just insert the extended channel into the map + channel_manager_data.extended_channels + .insert(m.channel_id, Arc::new(RwLock::new(extended_channel))); + + m.into_static() + }).unwrap(); + + self.channel_state + .sv1_server_sender + .send(Mining::OpenExtendedMiningChannelSuccess(success.clone())) + .await + .map_err(|e| { + error!("Failed to send OpenExtendedMiningChannelSuccess: {:?}", e); + HandlerError::External(Box::new(TproxyError::ChannelErrorSender)) + })?; + + Ok(()) + } + + async fn handle_open_mining_channel_error( + &mut self, + m: roles_logic_sv2::mining_sv2::OpenMiningChannelError<'_>, + ) -> Result<(), HandlerError> { + error!( + "Received OpenExtendedMiningChannelError with error code {}", + std::str::from_utf8(m.error_code.as_ref()).unwrap_or("unknown error code") + ); + todo!("OpenMiningChannelError not handled yet"); + } + + async fn handle_update_channel_error( + &mut self, + m: roles_logic_sv2::mining_sv2::UpdateChannelError<'_>, + ) -> Result<(), HandlerError> { + error!( + "Received UpdateChannelError with error code {}", + std::str::from_utf8(m.error_code.as_ref()).unwrap_or("unknown error code") + ); + todo!() + } + + async fn handle_close_channel( + &mut self, + m: roles_logic_sv2::mining_sv2::CloseChannel<'_>, + ) -> Result<(), HandlerError> { + info!("Received CloseChannel for channel id: {}", m.channel_id); + _ = self.channel_manager_data.safe_lock(|channel_data_manager| { + if channel_data_manager.mode == ChannelMode::Aggregated { + if channel_data_manager.upstream_extended_channel.is_some() { + channel_data_manager.upstream_extended_channel = None; + } + } else { + channel_data_manager.extended_channels.remove(&m.channel_id); + } + }); + Ok(()) + } + + async fn handle_set_extranonce_prefix( + &mut self, + _m: roles_logic_sv2::mining_sv2::SetExtranoncePrefix<'_>, + ) -> Result<(), HandlerError> { + unreachable!("Cannot process SetExtranoncePrefix since set_extranonce is not supported for majority of sv1 clients"); + } + + async fn handle_submit_shares_success( + &mut self, + m: roles_logic_sv2::mining_sv2::SubmitSharesSuccess, + ) -> Result<(), HandlerError> { + info!("Received SubmitSharesSuccess"); + debug!("SubmitSharesSuccess: {:?}", m); + Ok(()) + } + + async fn handle_submit_shares_error( + &mut self, + m: roles_logic_sv2::mining_sv2::SubmitSharesError<'_>, + ) -> Result<(), HandlerError> { + warn!("Received SubmitSharesError: {:?}", m); + Ok(()) + } + + async fn handle_new_mining_job( + &mut self, + _m: roles_logic_sv2::mining_sv2::NewMiningJob<'_>, + ) -> Result<(), HandlerError> { + unreachable!( + "Cannot process NewMiningJob since Translator Proxy supports only extended mining jobs" + ) + } + + async fn handle_new_extended_mining_job( + &mut self, + m: NewExtendedMiningJob<'_>, + ) -> Result<(), HandlerError> { + let mut m_static = m.clone().into_static(); + _ = self.channel_manager_data.safe_lock(|channel_manage_data| { + if channel_manage_data.mode == ChannelMode::Aggregated { + if channel_manage_data.upstream_extended_channel.is_some() { + let mut upstream_extended_channel = channel_manage_data + .upstream_extended_channel + .as_ref() + .unwrap() + .write() + .unwrap(); + upstream_extended_channel.on_new_extended_mining_job(m_static.clone()); + m_static.channel_id = 0; // this is done so that every aggregated downstream + // will + // receive the NewExtendedMiningJob message + } + channel_manage_data + .extended_channels + .iter() + .for_each(|(_, channel)| { + let mut channel = channel.write().unwrap(); + channel.on_new_extended_mining_job(m_static.clone()); + }); + } else if let Some(channel) = channel_manage_data + .extended_channels + .get(&m_static.channel_id) + { + let mut channel = channel.write().unwrap(); + channel.on_new_extended_mining_job(m_static.clone()); + } + }); + let job = m_static; + if !job.is_future() { + self.channel_state + .sv1_server_sender + .send(Mining::NewExtendedMiningJob(job)) + .await + .map_err(|e| { + error!("Failed to send immediate NewExtendedMiningJob: {:?}", e); + HandlerError::External(Box::new(TproxyError::ChannelErrorSender)) + })?; + } + Ok(()) + } + + async fn handle_set_new_prev_hash( + &mut self, + m: SetNewPrevHash<'_>, + ) -> Result<(), HandlerError> { + let m_static = m.clone().into_static(); + _ = self.channel_manager_data.safe_lock(|channel_manager_data| { + info!("Received SetNewPrevHash for channel id: {}", m.channel_id); + + if channel_manager_data.mode == ChannelMode::Aggregated { + if channel_manager_data.upstream_extended_channel.is_some() { + let mut upstream_extended_channel = channel_manager_data + .upstream_extended_channel + .as_ref() + .unwrap() + .write() + .unwrap(); + _ = upstream_extended_channel.on_set_new_prev_hash(m_static.clone()); + } + channel_manager_data + .extended_channels + .iter() + .for_each(|(_, channel)| { + let mut channel = channel.write().unwrap(); + _ = channel.on_set_new_prev_hash(m_static.clone()); + }); + } else if let Some(channel) = channel_manager_data + .extended_channels + .get(&m_static.channel_id) + { + let mut channel = channel.write().unwrap(); + _ = channel.on_set_new_prev_hash(m_static.clone()); + } + }); + + self.channel_state + .sv1_server_sender + .send(Mining::SetNewPrevHash(m_static.clone())) + .await + .map_err(|e| { + error!("Failed to send SetNewPrevHash: {:?}", e); + HandlerError::External(Box::new(TproxyError::ChannelErrorSender)) + })?; + + let mode = self + .channel_manager_data + .super_safe_lock(|c| c.mode.clone()); + + let active_job = if mode == ChannelMode::Aggregated { + self.channel_manager_data.super_safe_lock(|c| { + c.upstream_extended_channel + .as_ref() + .and_then(|ch| ch.read().ok()) + .and_then(|ch| ch.get_active_job().map(|j| j.0.clone())) + }) + } else { + self.channel_manager_data.super_safe_lock(|c| { + c.extended_channels + .get(&m.channel_id) + .and_then(|ch| ch.read().ok()) + .and_then(|ch| ch.get_active_job().map(|j| j.0.clone())) + }) + }; + + if let Some(mut job) = active_job { + if mode == ChannelMode::Aggregated { + job.channel_id = 0; + } + self.channel_state + .sv1_server_sender + .send(Mining::NewExtendedMiningJob(job)) + .await + .map_err(|e| { + error!("Failed to send NewExtendedMiningJob: {:?}", e); + HandlerError::External(Box::new(TproxyError::ChannelErrorSender)) + })?; + } + Ok(()) + } + + async fn handle_set_custom_mining_job_success( + &mut self, + _m: roles_logic_sv2::mining_sv2::SetCustomMiningJobSuccess, + ) -> Result<(), HandlerError> { + unreachable!("Cannot process SetCustomMiningJobSuccess since Translator Proxy does not support custom mining jobs") + } + + async fn handle_set_custom_mining_job_error( + &mut self, + _m: roles_logic_sv2::mining_sv2::SetCustomMiningJobError<'_>, + ) -> Result<(), HandlerError> { + unreachable!("Cannot process SetCustomMiningJobError since Translator Proxy does not support custom mining jobs") + } + + async fn handle_set_target(&mut self, m: SetTarget<'_>) -> Result<(), HandlerError> { + _ = self.channel_manager_data.safe_lock(|channel_manager_data| { + if channel_manager_data.mode == ChannelMode::Aggregated { + if channel_manager_data.upstream_extended_channel.is_some() { + let mut upstream_extended_channel = channel_manager_data + .upstream_extended_channel + .as_ref() + .unwrap() + .write() + .unwrap(); + upstream_extended_channel.set_target(m.maximum_target.clone().into()); + } + channel_manager_data + .extended_channels + .iter() + .for_each(|(_, channel)| { + let mut channel = channel.write().unwrap(); + channel.set_target(m.maximum_target.clone().into()); + }); + } else if let Some(channel) = channel_manager_data.extended_channels.get(&m.channel_id) + { + let mut channel = channel.write().unwrap(); + channel.set_target(m.maximum_target.clone().into()); + } + }); + Ok(()) + } + + async fn handle_set_group_channel( + &mut self, + _m: roles_logic_sv2::mining_sv2::SetGroupChannel<'_>, + ) -> Result<(), HandlerError> { + unreachable!( + "Cannot process SetGroupChannel since Translator Proxy does not support group channels" + ) + } +} diff --git a/roles/translator/src/lib/sv2/channel_manager/mod.rs b/roles/translator/src/lib/sv2/channel_manager/mod.rs new file mode 100644 index 0000000000..689a6efc7f --- /dev/null +++ b/roles/translator/src/lib/sv2/channel_manager/mod.rs @@ -0,0 +1,6 @@ +pub mod channel_manager; +pub mod message_handler; +pub use channel_manager::ChannelManager; +pub(super) mod channel; +pub(crate) mod data; +pub use data::ChannelMode; diff --git a/roles/translator/src/lib/sv2/mod.rs b/roles/translator/src/lib/sv2/mod.rs new file mode 100644 index 0000000000..d8cb5e360c --- /dev/null +++ b/roles/translator/src/lib/sv2/mod.rs @@ -0,0 +1,5 @@ +pub mod channel_manager; +pub mod upstream; + +pub use channel_manager::channel_manager::ChannelManager; +pub use upstream::upstream::Upstream; diff --git a/roles/translator/src/lib/sv2/upstream/channel.rs b/roles/translator/src/lib/sv2/upstream/channel.rs new file mode 100644 index 0000000000..2232df8bba --- /dev/null +++ b/roles/translator/src/lib/sv2/upstream/channel.rs @@ -0,0 +1,41 @@ +use async_channel::{Receiver, Sender}; +use codec_sv2::StandardEitherFrame; +use roles_logic_sv2::parsers_sv2::AnyMessage; +use tracing::debug; + +pub type Message = AnyMessage<'static>; +pub type EitherFrame = StandardEitherFrame; + +#[derive(Debug, Clone)] +pub struct UpstreamChannelState { + /// Receiver for the SV2 Upstream role + pub upstream_receiver: Receiver, + /// Sender for the SV2 Upstream role + pub upstream_sender: Sender, + /// Sender for the ChannelManager thread + pub channel_manager_sender: Sender, + /// Receiver for the ChannelManager thread + pub channel_manager_receiver: Receiver, +} + +impl UpstreamChannelState { + pub fn new( + channel_manager_sender: Sender, + channel_manager_receiver: Receiver, + upstream_receiver: Receiver, + upstream_sender: Sender, + ) -> Self { + Self { + channel_manager_sender, + channel_manager_receiver, + upstream_receiver, + upstream_sender, + } + } + + pub fn drop(&self) { + debug!("Closing all upstream channels"); + self.upstream_receiver.close(); + self.upstream_receiver.close(); + } +} diff --git a/roles/translator/src/lib/sv2/upstream/data.rs b/roles/translator/src/lib/sv2/upstream/data.rs new file mode 100644 index 0000000000..f5ee474e80 --- /dev/null +++ b/roles/translator/src/lib/sv2/upstream/data.rs @@ -0,0 +1,2 @@ +#[derive(Debug, Clone)] +pub struct UpstreamData; diff --git a/roles/translator/src/lib/sv2/upstream/message_handler.rs b/roles/translator/src/lib/sv2/upstream/message_handler.rs new file mode 100644 index 0000000000..ace9647b14 --- /dev/null +++ b/roles/translator/src/lib/sv2/upstream/message_handler.rs @@ -0,0 +1,54 @@ +use crate::sv2::Upstream; +use roles_logic_sv2::{ + common_messages_sv2::{ + ChannelEndpointChanged, Reconnect, SetupConnectionError, SetupConnectionSuccess, + }, + handlers_sv2::{HandlerError, ParseCommonMessagesFromUpstreamAsync}, +}; +use tracing::{error, info}; + +impl ParseCommonMessagesFromUpstreamAsync for Upstream { + async fn handle_setup_connection_error( + &mut self, + msg: SetupConnectionError<'_>, + ) -> Result<(), HandlerError> { + error!( + "Received `SetupConnectionError`: version={}, flags={:b}", + msg.error_code, msg.flags + ); + + todo!() + } + + async fn handle_setup_connection_success( + &mut self, + msg: SetupConnectionSuccess, + ) -> Result<(), HandlerError> { + info!( + "Received `SetupConnectionSuccess`: version={}, flags={:b}", + msg.used_version, msg.flags + ); + + Ok(()) + } + + async fn handle_channel_endpoint_changed( + &mut self, + msg: ChannelEndpointChanged, + ) -> Result<(), HandlerError> { + info!( + "Received `ChannelEndpointChanged`: channel_id: {}", + msg.channel_id + ); + + todo!() + } + + async fn handle_reconnect(&mut self, msg: Reconnect<'_>) -> Result<(), HandlerError> { + info!( + "Received `Reconnect`: new_host: {}, new_port: {}", + msg.new_host, msg.new_port + ); + todo!() + } +} diff --git a/roles/translator/src/lib/sv2/upstream/mod.rs b/roles/translator/src/lib/sv2/upstream/mod.rs new file mode 100644 index 0000000000..01d3a0213a --- /dev/null +++ b/roles/translator/src/lib/sv2/upstream/mod.rs @@ -0,0 +1,5 @@ +pub mod message_handler; +pub mod upstream; +pub use upstream::Upstream; +pub(super) mod channel; +pub(super) mod data; diff --git a/roles/translator/src/lib/sv2/upstream/upstream.rs b/roles/translator/src/lib/sv2/upstream/upstream.rs new file mode 100644 index 0000000000..87f72c0f19 --- /dev/null +++ b/roles/translator/src/lib/sv2/upstream/upstream.rs @@ -0,0 +1,485 @@ +use crate::{ + error::TproxyError, + status::{handle_error, Status, StatusSender}, + sv2::upstream::{channel::UpstreamChannelState, data::UpstreamData}, + task_manager::TaskManager, + utils::{message_from_frame, ShutdownMessage}, +}; +use async_channel::{Receiver, Sender}; +use codec_sv2::{HandshakeRole, Initiator, StandardEitherFrame, StandardSv2Frame}; +use key_utils::Secp256k1PublicKey; +use network_helpers_sv2::noise_connection::Connection; +use roles_logic_sv2::{ + common_messages_sv2::{Protocol, SetupConnection}, + handlers_sv2::ParseCommonMessagesFromUpstreamAsync, + parsers_sv2::AnyMessage, + utils::Mutex, +}; +use std::{net::SocketAddr, sync::Arc}; +use tokio::{ + net::TcpStream, + sync::{broadcast, mpsc}, + time::{sleep, Duration}, +}; +use tracing::{debug, error, info, warn}; + +/// Type alias for SV2 messages with static lifetime +pub type Message = AnyMessage<'static>; +/// Type alias for standard SV2 frames +pub type StdFrame = StandardSv2Frame; +/// Type alias for either handshake or SV2 frames +pub type EitherFrame = StandardEitherFrame; + +/// Manages the upstream SV2 connection to a mining pool or proxy. +/// +/// This struct handles the SV2 protocol communication with upstream servers, +/// including: +/// - Connection establishment with multiple upstream fallbacks +/// - SV2 handshake and setup procedures +/// - Message routing between channel manager and upstream +/// - Connection monitoring and error handling +/// - Graceful shutdown coordination +/// +/// The upstream connection supports automatic failover between multiple +/// configured upstream servers and implements retry logic for connection +/// establishment. +#[derive(Debug, Clone)] +pub struct Upstream { + upstream_channel_state: UpstreamChannelState, + upstream_channel_data: Arc>, +} + +impl Upstream { + /// Creates a new upstream connection by attempting to connect to configured servers. + /// + /// This method tries to establish a connection to one of the provided upstream + /// servers, implementing retry logic and fallback behavior. It will attempt + /// to connect to each server multiple times before giving up. + /// + /// # Arguments + /// * `upstreams` - List of (address, public_key) pairs for upstream servers + /// * `channel_manager_sender` - Channel to send messages to the channel manager + /// * `channel_manager_receiver` - Channel to receive messages from the channel manager + /// * `notify_shutdown` - Broadcast channel for shutdown coordination + /// * `shutdown_complete_tx` - Channel to signal shutdown completion + /// + /// # Returns + /// * `Ok(Upstream)` - Successfully connected to an upstream server + /// * `Err(TproxyError)` - Failed to connect to any upstream server + pub async fn new( + upstreams: &[(SocketAddr, Secp256k1PublicKey)], + channel_manager_sender: Sender, + channel_manager_receiver: Receiver, + notify_shutdown: broadcast::Sender, + shutdown_complete_tx: mpsc::Sender<()>, + ) -> Result { + let mut shutdown_rx = notify_shutdown.subscribe(); + const RETRIES_PER_UPSTREAM: u8 = 3; + + for (index, (addr, pubkey)) in upstreams.iter().enumerate() { + info!("Trying to connect to upstream {} at {}", index, addr); + + for attempt in 1..=RETRIES_PER_UPSTREAM { + if shutdown_rx.try_recv().is_ok() { + info!("Shutdown signal received during upstream connection attempt. Aborting."); + drop(shutdown_complete_tx); + return Err(TproxyError::Shutdown); + } + + match TcpStream::connect(addr).await { + Ok(socket) => { + info!( + "Connected to upstream at {} (attempt {}/{})", + addr, attempt, RETRIES_PER_UPSTREAM + ); + + let initiator = Initiator::from_raw_k(pubkey.into_bytes())?; + match Connection::new(socket, HandshakeRole::Initiator(initiator)).await { + Ok((receiver, sender)) => { + let upstream_channel_state = UpstreamChannelState::new( + channel_manager_sender, + channel_manager_receiver, + receiver, + sender, + ); + let upstream_channel_data = Arc::new(Mutex::new(UpstreamData)); + info!("Successfully initialized upstream channel with {}", addr); + + return Ok(Self { + upstream_channel_state, + upstream_channel_data, + }); + } + Err(e) => { + error!( + "Failed Noise handshake with {}: {:?}. Retrying...", + addr, e + ); + } + } + } + Err(e) => { + error!( + "Failed to connect to {}: {}. Retry {}/{}...", + addr, e, attempt, RETRIES_PER_UPSTREAM + ); + } + } + + sleep(Duration::from_secs(5)).await; + } + + warn!("Exhausted retries for upstream {} at {}", index, addr); + } + + error!("Failed to connect to any configured upstream."); + drop(shutdown_complete_tx); + Err(TproxyError::Shutdown) + } + + /// Starts the upstream connection and begins message processing. + /// + /// This method: + /// - Completes the SV2 handshake with the upstream server + /// - Spawns the main message processing task + /// - Handles graceful shutdown coordination + /// + /// The method will first attempt to complete the SV2 setup connection + /// handshake. If successful, it spawns a task to handle bidirectional + /// message flow between the channel manager and upstream server. + /// + /// # Arguments + /// * `notify_shutdown` - Broadcast channel for shutdown coordination + /// * `shutdown_complete_tx` - Channel to signal shutdown completion + /// * `status_sender` - Channel for sending status updates + /// * `task_manager` - Manager for spawned async tasks + /// + /// # Returns + /// * `Ok(())` - Upstream started successfully + /// * `Err(TproxyError)` - Error during startup or handshake + pub async fn start( + mut self, + notify_shutdown: broadcast::Sender, + shutdown_complete_tx: mpsc::Sender<()>, + status_sender: Sender, + task_manager: Arc, + ) -> Result<(), TproxyError> { + info!("Upstream: starting..."); + + let mut shutdown_rx = notify_shutdown.subscribe(); + + // Wait for connection setup or shutdown signal + tokio::select! { + result = self.setup_connection() => { + if let Err(e) = result { + error!("Upstream: failed to set up SV2 connection: {:?}", e); + drop(shutdown_complete_tx); + return Err(e); + } + info!("Upstream: SV2 connection setup successful."); + } + message = shutdown_rx.recv() => { + match message { + Ok(ShutdownMessage::ShutdownAll) => { + info!("Upstream: shutdown signal received during connection setup."); + drop(shutdown_complete_tx); + return Ok(()); + } + Ok(_) => {} + + Err(e) => { + error!("Upstream: failed to receive shutdown signal: {e}"); + drop(shutdown_complete_tx); + return Ok(()); + } + } + } + } + + // Wrap status sender and start upstream task + let wrapped_status_sender = StatusSender::Upstream(status_sender); + + self.run_upstream_task( + notify_shutdown, + shutdown_complete_tx, + wrapped_status_sender, + task_manager, + )?; + + Ok(()) + } + + /// Performs the SV2 handshake setup with the upstream server. + /// + /// This method handles the initial SV2 protocol handshake by: + /// - Creating and sending a SetupConnection message + /// - Waiting for the handshake response + /// - Validating and processing the response + /// + /// The handshake establishes the protocol version, capabilities, and + /// other connection parameters needed for SV2 communication. + /// + /// # Returns + /// * `Ok(())` - Handshake completed successfully + /// * `Err(TproxyError)` - Handshake failed or connection error + pub async fn setup_connection(&mut self) -> Result<(), TproxyError> { + info!("Upstream: initiating SV2 handshake..."); + + // Build SetupConnection message + let setup_conn_msg = Self::get_setup_connection_message(2, 2, false)?; + let sv2_frame: StdFrame = + Message::Common(setup_conn_msg.into()) + .try_into() + .map_err(|e| { + error!("Failed to serialize SetupConnection message: {:?}", e); + TproxyError::ParserError(e) + })?; + + // Send SetupConnection message to upstream + info!("Upstream: sending SetupConnection..."); + self.upstream_channel_state + .upstream_sender + .send(sv2_frame.into()) + .await + .map_err(|e| { + error!("Failed to send SetupConnection to upstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + + let mut incoming: StdFrame = + match self.upstream_channel_state.upstream_receiver.recv().await { + Ok(frame) => { + debug!("Received handshake response from upstream."); + frame.try_into()? + } + Err(e) => { + error!("Failed to receive handshake response from upstream: {}", e); + return Err(TproxyError::CodecNoise( + codec_sv2::noise_sv2::Error::ExpectedIncomingHandshakeMessage, + )); + } + }; + + let message_type = incoming + .get_header() + .ok_or_else(|| { + error!("Expected handshake frame but no header found."); + framing_sv2::Error::ExpectedHandshakeFrame + })? + .msg_type(); + + let payload = incoming.payload(); + + self.handle_common_message(message_type, payload).await?; + info!("Upstream: handshake completed successfully."); + Ok(()) + } + + /// Processes incoming messages from the upstream SV2 server. + /// + /// This method handles different types of frames received from upstream: + /// - SV2 frames: Parses and routes mining/common messages appropriately + /// - Handshake frames: Logs for debugging (shouldn't occur during normal operation) + /// + /// Common messages are handled directly, while mining messages are forwarded + /// to the channel manager for processing and distribution to downstream connections. + /// + /// # Arguments + /// * `message` - The frame received from the upstream server + /// + /// # Returns + /// * `Ok(())` - Message processed successfully + /// * `Err(TproxyError)` - Error processing the message + pub async fn on_upstream_message(&self, message: EitherFrame) -> Result<(), TproxyError> { + let mut upstream = self.get_upstream(); + match message { + EitherFrame::Sv2(sv2_frame) => { + // Convert to standard frame + let std_frame: StdFrame = sv2_frame; + + // Parse message from frame + let mut frame: codec_sv2::Frame, buffer_sv2::Slice> = + std_frame.clone().into(); + + let (messsage_type, mut payload, parsed_message) = message_from_frame(&mut frame)?; + + match parsed_message { + AnyMessage::Common(_) => { + // Handle common upstream messages + upstream + .handle_common_message(messsage_type, &mut payload) + .await?; + } + + AnyMessage::Mining(_) => { + // Forward mining message to channel manager + let frame_to_forward = EitherFrame::Sv2(std_frame); + self.upstream_channel_state + .channel_manager_sender + .send(frame_to_forward) + .await + .map_err(|e| { + error!("Failed to send mining message to channel manager: {:?}", e); + TproxyError::ChannelErrorSender + })?; + } + + _ => { + error!("Received unsupported message type from upstream."); + return Err(TproxyError::UnexpectedMessage); + } + } + } + + EitherFrame::HandShake(handshake_frame) => { + debug!("Received handshake frame: {:?}", handshake_frame); + } + } + Ok(()) + } + + /// Spawns a unified task to handle upstream message I/O and shutdown logic. + fn run_upstream_task( + self, + notify_shutdown: broadcast::Sender, + shutdown_complete_tx: mpsc::Sender<()>, + status_sender: StatusSender, + task_manager: Arc, + ) -> Result<(), TproxyError> { + let mut shutdown_rx = notify_shutdown.subscribe(); + let shutdown_complete_tx = shutdown_complete_tx.clone(); + + task_manager.spawn(async move { + info!("Upstream task started (combined sender + receiver loop)."); + + loop { + tokio::select! { + // Handle shutdown signals + shutdown = shutdown_rx.recv() => { + match shutdown { + Ok(ShutdownMessage::ShutdownAll) => { + info!("Upstream: received ShutdownAll signal. Exiting loop."); + break; + } + Ok(_) => { + // Ignore other shutdown variants for upstream + } + Err(e) => { + error!("Upstream: failed to receive shutdown signal: {e}"); + break; + } + } + } + + // Handle incoming SV2 messages from upstream + result = self.upstream_channel_state.upstream_receiver.recv() => { + match result { + Ok(frame) => { + debug!("Upstream: received frame."); + if let Err(e) = self.on_upstream_message(frame).await { + error!("Upstream: error while processing message: {e:?}"); + handle_error(&status_sender, TproxyError::ChannelErrorSender).await; + } + } + Err(e) => { + error!("Upstream: receiver channel closed unexpectedly: {e}"); + handle_error(&status_sender, TproxyError::ChannelErrorReceiver(e)).await; + break; + } + } + } + + // Handle messages from channel manager to send upstream + result = self.upstream_channel_state.channel_manager_receiver.recv() => { + match result { + Ok(msg) => { + info!("Upstream: sending message from channel manager."); + if let Err(e) = self.send_upstream(msg).await { + error!("Upstream: failed to send message: {e:?}"); + handle_error(&status_sender, TproxyError::ChannelErrorSender).await; + } + } + Err(e) => { + error!("Upstream: channel manager receiver closed: {e}"); + handle_error(&status_sender, TproxyError::ChannelErrorReceiver(e)).await; + break; + } + } + } + } + } + + self.upstream_channel_state.drop(); + warn!("Upstream: task shutting down cleanly."); + drop(shutdown_complete_tx); + }); + + Ok(()) + } + + /// Sends a message to the upstream SV2 server. + /// + /// This method forwards messages from the channel manager to the upstream + /// server. Messages are typically mining-related (share submissions, channel + /// requests, etc.) that need to be sent upstream. + /// + /// # Arguments + /// * `sv2_frame` - The SV2 frame to send to the upstream server + /// + /// # Returns + /// * `Ok(())` - Message sent successfully + /// * `Err(TproxyError)` - Error sending the message + pub async fn send_upstream(&self, sv2_frame: EitherFrame) -> Result<(), TproxyError> { + debug!("Sending message to upstream."); + + self.upstream_channel_state + .upstream_sender + .send(sv2_frame) + .await + .map_err(|e| { + error!("Failed to send message to upstream: {:?}", e); + TproxyError::ChannelErrorSender + })?; + + Ok(()) + } + + /// Constructs the `SetupConnection` message. + #[allow(clippy::result_large_err)] + fn get_setup_connection_message( + min_version: u16, + max_version: u16, + is_work_selection_enabled: bool, + ) -> Result, TproxyError> { + let endpoint_host = "0.0.0.0".to_string().into_bytes().try_into()?; + let vendor = "SRI".to_string().try_into()?; + let hardware_version = "Translator Proxy".to_string().try_into()?; + let firmware = String::new().try_into()?; + let device_id = String::new().try_into()?; + let flags = if is_work_selection_enabled { + 0b110 + } else { + 0b100 + }; + + Ok(SetupConnection { + protocol: Protocol::MiningProtocol, + min_version, + max_version, + flags, + endpoint_host, + endpoint_port: 50, + vendor, + hardware_version, + firmware, + device_id, + }) + } + + fn get_upstream(&self) -> Upstream { + Upstream { + upstream_channel_data: self.upstream_channel_data.clone(), + upstream_channel_state: self.upstream_channel_state.clone(), + } + } +} diff --git a/roles/translator/src/lib/task_manager.rs b/roles/translator/src/lib/task_manager.rs new file mode 100644 index 0000000000..bfafa0fdea --- /dev/null +++ b/roles/translator/src/lib/task_manager.rs @@ -0,0 +1,73 @@ +use std::sync::Mutex as StdMutex; +use tokio::task::JoinHandle; + +/// Manages a collection of spawned tokio tasks. +/// +/// This struct provides a centralized way to spawn, track, and manage the lifecycle +/// of async tasks in the translator. It maintains a list of join handles that can +/// be used to wait for all tasks to complete or abort them during shutdown. +pub struct TaskManager { + tasks: StdMutex>>, +} + +impl Default for TaskManager { + fn default() -> Self { + Self::new() + } +} + +impl TaskManager { + /// Creates a new TaskManager instance. + /// + /// Initializes an empty task manager ready to spawn and track tasks. + pub fn new() -> Self { + Self { + tasks: StdMutex::new(Vec::new()), + } + } + + /// Spawns a new async task and adds it to the managed collection. + /// + /// The task will be tracked by this manager and can be waited for or aborted + /// using the other methods. + /// + /// # Arguments + /// * `fut` - The future to spawn as a task + pub fn spawn(&self, fut: F) + where + F: std::future::Future + Send + 'static, + { + let handle = tokio::spawn(async move { + fut.await; + }); + + self.tasks.lock().unwrap().push(handle); + } + + /// Waits for all managed tasks to complete. + /// + /// This method will block until all tasks that were spawned through this + /// manager have finished executing. Tasks are joined in reverse order + /// (most recently spawned first). + pub async fn join_all(&self) { + let handles = { + let mut tasks = self.tasks.lock().unwrap(); + std::mem::take(&mut *tasks) + }; + + for handle in handles { + let _ = handle.await; + } + } + + /// Aborts all managed tasks. + /// + /// This method immediately cancels all tasks that were spawned through this + /// manager. The tasks will be terminated without waiting for them to complete. + pub async fn abort_all(&self) { + let mut tasks = self.tasks.lock().unwrap(); + for handle in tasks.drain(..) { + handle.abort(); + } + } +} diff --git a/roles/translator/src/lib/upstream_sv2/diff_management.rs b/roles/translator/src/lib/upstream_sv2/diff_management.rs deleted file mode 100644 index 47ede36ebd..0000000000 --- a/roles/translator/src/lib/upstream_sv2/diff_management.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! ## Upstream SV2 Difficulty Management -//! -//! This module contains logic for managing difficulty and hashrate updates -//! specifically for the upstream SV2 connection. -//! -//! It defines method for the [`Upstream`] struct -//! related to checking configuration intervals and sending -//! `UpdateChannel` messages to the upstream server -//! based on configured nominal hashrate changes. - -use super::Upstream; - -use super::super::{ - error::ProxyResult, - upstream_sv2::{EitherFrame, Message, StdFrame}, -}; -use std::{sync::Arc, time::Duration}; -use stratum_common::roles_logic_sv2::{ - codec_sv2::binary_sv2::U256, mining_sv2::UpdateChannel, parsers_sv2::Mining, utils::Mutex, - Error as RolesLogicError, -}; - -impl Upstream { - /// Attempts to update the upstream channel's nominal hashrate if the configured - /// update interval has elapsed or if the nominal hashrate has changed - pub(super) async fn try_update_hashrate(self_: Arc>) -> ProxyResult<'static, ()> { - let (channel_id_option, diff_mgmt, tx_frame, last_sent_hashrate) = - self_.safe_lock(|u| { - ( - u.channel_id, - u.difficulty_config.clone(), - u.connection.sender.clone(), - u.last_sent_hashrate, - ) - })?; - - let channel_id = channel_id_option.ok_or(super::super::error::Error::RolesSv2Logic( - RolesLogicError::NotFoundChannelId, - ))?; - - let (timeout, new_hashrate) = diff_mgmt - .safe_lock(|d| (d.channel_diff_update_interval, d.channel_nominal_hashrate))?; - - let has_changed = Some(new_hashrate) != last_sent_hashrate; - - if has_changed { - // Send UpdateChannel only if hashrate actually changed - let update_channel = UpdateChannel { - channel_id, - nominal_hash_rate: new_hashrate, - maximum_target: U256::from([0xff; 32]), - }; - let message = Message::Mining(Mining::UpdateChannel(update_channel)); - let either_frame: StdFrame = message.try_into()?; - let frame: EitherFrame = either_frame.into(); - - tx_frame.send(frame).await?; - - self_.safe_lock(|u| u.last_sent_hashrate = Some(new_hashrate))?; - } - - // Always sleep, regardless of update - tokio::time::sleep(Duration::from_secs(timeout as u64)).await; - Ok(()) - } -} diff --git a/roles/translator/src/lib/upstream_sv2/mod.rs b/roles/translator/src/lib/upstream_sv2/mod.rs deleted file mode 100644 index 9f334238a8..0000000000 --- a/roles/translator/src/lib/upstream_sv2/mod.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! ## Upstream SV2 Module -//! -//! This module encapsulates the logic for handling the upstream connection using the SV2 protocol. -//! -//! The module is organized into the following sub-modules: -//! - [`diff_management`]: Contains logic related to managing difficulty and hashrate updates. -//! - [`upstream`]: Defines the main [`Upstream`] struct and its core functionalities. -//! - [`upstream_connection`]: Handles the underlying connection details and frame -//! sending/receiving. - -use stratum_common::roles_logic_sv2::{ - codec_sv2::{StandardEitherFrame, StandardSv2Frame}, - parsers_sv2::AnyMessage, -}; - -pub mod diff_management; -pub mod upstream; -pub mod upstream_connection; -pub use upstream::Upstream; -pub use upstream_connection::UpstreamConnection; - -pub type Message = AnyMessage<'static>; -pub type StdFrame = StandardSv2Frame; -pub type EitherFrame = StandardEitherFrame; - -/// Represents the state or parameters negotiated during an SV2 Setup Connection message. -#[derive(Clone, Copy, Debug)] -pub struct Sv2MiningConnection { - _version: u16, - _setup_connection_flags: u32, - #[allow(dead_code)] - setup_connection_success_flags: u32, -} diff --git a/roles/translator/src/lib/upstream_sv2/upstream.rs b/roles/translator/src/lib/upstream_sv2/upstream.rs deleted file mode 100644 index aeca7e7499..0000000000 --- a/roles/translator/src/lib/upstream_sv2/upstream.rs +++ /dev/null @@ -1,874 +0,0 @@ -//! ## Upstream SV2 Module: Upstream Connection Logic -//! -//! Defines the [`Upstream`] structure, which represents and manages the connection -//! to a single upstream role. -//! -//! This module is responsible for: -//! - Establishing and maintaining the network connection to the upstream role. -//! - Performing the SV2 handshake and opening mining channels. -//! - Sending translated SV2 `SubmitSharesExtended` messages received from the Bridge to the -//! upstream pool. -//! - Receiving SV2 job messages (`SetNewPrevHash`, `NewExtendedMiningJob`, etc.) from the upstream -//! pool and forwarding them to the Bridge for translation. -//! - Handling various SV2 messages related to connection setup, channel management, and mining -//! operations. -//! - Managing difficulty updates for the upstream channel based on aggregated hashrate from -//! downstream miners. -//! - Implementing the necessary SV2 roles logic traits (`IsUpstream`, `IsMiningUpstream`, -//! `ParseCommonMessagesFromUpstream`, `ParseMiningMessagesFromUpstream`). - -use crate::{ - config::UpstreamDifficultyConfig, - downstream_sv1::Downstream, - error::{ - Error::{CodecNoise, InvalidExtranonce, PoisonLock, UpstreamIncoming}, - ProxyResult, - }, - status, - upstream_sv2::{EitherFrame, Message, StdFrame, UpstreamConnection}, -}; -use async_channel::{Receiver, Sender}; -use error_handling::handle_result; -use key_utils::Secp256k1PublicKey; -use std::{ - net::SocketAddr, - sync::{atomic::AtomicBool, Arc}, -}; -use stratum_common::{ - network_helpers_sv2::noise_connection::Connection, - roles_logic_sv2::{ - self, - codec_sv2::{self, binary_sv2::u256_from_int, framing_sv2, HandshakeRole, Initiator}, - common_messages_sv2::{Protocol, SetupConnection}, - handlers::{ - common::{ParseCommonMessagesFromUpstream, SendTo as SendToCommon}, - mining::{ParseMiningMessagesFromUpstream, SendTo}, - }, - mining_sv2::{ - ExtendedExtranonce, Extranonce, NewExtendedMiningJob, OpenExtendedMiningChannel, - SetNewPrevHash, SubmitSharesExtended, - }, - parsers_sv2::Mining, - utils::Mutex, - Error as RolesLogicError, - Error::NoUpstreamsConnected, - }, -}; -use tokio::{ - net::TcpStream, - task::AbortHandle, - time::{sleep, Duration}, -}; -use tracing::{debug, error, info, warn}; - -use stratum_common::roles_logic_sv2::{ - bitcoin::BlockHash, common_messages_sv2::Reconnect, handlers::mining::SupportedChannelTypes, - mining_sv2::SetGroupChannel, -}; - -/// Atomic boolean flag used for synchronization between receiving a new job -/// and handling a new previous hash. Indicates whether a `NewExtendedMiningJob` -/// has been fully processed. -pub static IS_NEW_JOB_HANDLED: AtomicBool = AtomicBool::new(true); -/// Represents the currently active `prevhash` of the mining job being worked on OR being submitted -/// from the Downstream role. -#[derive(Debug, Clone)] -#[allow(dead_code)] -struct PrevHash { - /// `prevhash` of mining job. - prev_hash: BlockHash, - /// `nBits` encoded difficulty target. - nbits: u32, -} - -/// Represents a connection to a single SV2 Upstream role. -/// -/// This struct holds the state and communication channels necessary to interact -/// with the upstream server, including sending share submissions, receiving job -/// templates, and managing the SV2 protocol handshake and channel lifecycle. -#[derive(Debug, Clone)] -pub struct Upstream { - /// Newly assigned identifier of the channel, stable for the whole lifetime of the connection, - /// e.g. it is used for broadcasting new jobs by the `NewExtendedMiningJob` message. - pub(super) channel_id: Option, - /// Identifier of the job as provided by the `NewExtendedMiningJob` message. - job_id: Option, - /// Identifier of the job as provided by the ` SetCustomMiningJobSucces` message - last_job_id: Option, - /// Bytes used as implicit first part of `extranonce`. - extranonce_prefix: Option>, - /// Represents a connection to a SV2 Upstream role. - pub(super) connection: UpstreamConnection, - /// Receives SV2 `SubmitSharesExtended` messages translated from SV1 `mining.submit` messages. - /// Translated by and sent from the `Bridge`. - rx_sv2_submit_shares_ext: Receiver>, - /// Sends SV2 `SetNewPrevHash` messages to be translated (along with SV2 `NewExtendedMiningJob` - /// messages) into SV1 `mining.notify` messages. Received and translated by the `Bridge`. - tx_sv2_set_new_prev_hash: Sender>, - /// Sends SV2 `NewExtendedMiningJob` messages to be translated (along with SV2 `SetNewPrevHash` - /// messages) into SV1 `mining.notify` messages. Received and translated by the `Bridge`. - tx_sv2_new_ext_mining_job: Sender>, - /// Sends the extranonce1 and the channel id received in the SV2 - /// `OpenExtendedMiningChannelSuccess` message to be used by the `Downstream` and sent to - /// the Downstream role in a SV2 `mining.subscribe` response message. Passed to the - /// `Downstream` on connection creation. - tx_sv2_extranonce: Sender<(ExtendedExtranonce, u32)>, - /// This allows the upstream threads to be able to communicate back to the main thread its - /// current status. - tx_status: status::Sender, - /// The first `target` is received by the Upstream role in the SV2 - /// `OpenExtendedMiningChannelSuccess` message, then updated periodically via SV2 `SetTarget` - /// messages. Passed to the `Downstream` on connection creation and sent to the Downstream role - /// via the SV1 `mining.set_difficulty` message. - target: Arc>>, - /// Tracks the most recently sent nominal hashrate to prevent unnecessary updates. - pub last_sent_hashrate: Option, - /// Minimum `extranonce2` size. Initially requested in the `proxy-config.toml`, and ultimately - /// set by the SV2 Upstream via the SV2 `OpenExtendedMiningChannelSuccess` message. - pub min_extranonce_size: u16, - /// The size of the extranonce1 provided by the upstream role. - pub upstream_extranonce1_size: usize, - // values used to update the channel with the correct nominal hashrate. - // each Downstream instance will add and subtract their hashrates as needed - // and the upstream just needs to occasionally check if it has changed more than - // than the configured percentage - pub(super) difficulty_config: Arc>, - task_collector: Arc>>, -} - -impl PartialEq for Upstream { - fn eq(&self, other: &Self) -> bool { - self.channel_id == other.channel_id - } -} - -impl Upstream { - /// Instantiate a new `Upstream`. - /// Connect to the SV2 Upstream role (most typically a SV2 Pool). Initializes the - /// `UpstreamConnection` with a channel to send and receive messages from the SV2 Upstream - /// role and uses channels provided in the function arguments to send and receive messages - /// from the `Downstream`. - #[allow(clippy::too_many_arguments)] - pub async fn new( - address: SocketAddr, - authority_public_key: Secp256k1PublicKey, - rx_sv2_submit_shares_ext: Receiver>, - tx_sv2_set_new_prev_hash: Sender>, - tx_sv2_new_ext_mining_job: Sender>, - min_extranonce_size: u16, - tx_sv2_extranonce: Sender<(ExtendedExtranonce, u32)>, - tx_status: status::Sender, - target: Arc>>, - difficulty_config: Arc>, - task_collector: Arc>>, - ) -> ProxyResult<'static, Arc>> { - // Connect to the SV2 Upstream role retry connection every 5 seconds. - let socket = loop { - match TcpStream::connect(address).await { - Ok(socket) => break socket, - Err(e) => { - error!( - "Failed to connect to Upstream role at {}, retrying in 5s: {}", - address, e - ); - - sleep(Duration::from_secs(5)).await; - } - } - }; - - let pub_key: Secp256k1PublicKey = authority_public_key; - let initiator = Initiator::from_raw_k(pub_key.into_bytes())?; - - info!( - "PROXY SERVER - ACCEPTING FROM UPSTREAM: {}", - socket.peer_addr()? - ); - - // Channel to send and receive messages to the SV2 Upstream role - let (receiver, sender) = Connection::new(socket, HandshakeRole::Initiator(initiator)) - .await - .unwrap(); - // Initialize `UpstreamConnection` with channel for SV2 Upstream role communication and - // channel for downstream Translator Proxy communication - let connection = UpstreamConnection { receiver, sender }; - - Ok(Arc::new(Mutex::new(Self { - connection, - rx_sv2_submit_shares_ext, - extranonce_prefix: None, - tx_sv2_set_new_prev_hash, - tx_sv2_new_ext_mining_job, - channel_id: None, - job_id: None, - last_job_id: None, - min_extranonce_size, - upstream_extranonce1_size: 16, /* 16 is the default since that is the only value the - * pool supports currently */ - tx_sv2_extranonce, - tx_status, - target, - last_sent_hashrate: None, - difficulty_config, - task_collector, - }))) - } - - /// Performs the SV2 connection setup handshake with the Upstream role. - /// - /// Sends a `SetupConnection` message specifying supported protocol versions - /// and flags. Waits for the upstream to respond with either `SetupConnectionSuccess` - /// or `SetupConnectionError`.Upon successful setup, it then sends an - /// `OpenExtendedMiningChannel` request to establish a mining channel, including the - /// negotiated minimum extranonce size and initial nominal hashrate. - pub async fn connect( - self_: Arc>, - min_version: u16, - max_version: u16, - ) -> ProxyResult<'static, ()> { - // Get the `SetupConnection` message with Mining Device information (currently hard coded) - let setup_connection = Self::get_setup_connection_message(min_version, max_version, false)?; - let mut connection = self_.safe_lock(|s| s.connection.clone())?; - - // Put the `SetupConnection` message in a `StdFrame` to be sent over the wire - let sv2_frame: StdFrame = Message::Common(setup_connection.into()).try_into()?; - // Send the `SetupConnection` frame to the SV2 Upstream role - // Only one Upstream role is supported, panics if multiple connections are encountered - connection.send(sv2_frame).await?; - - // Wait for the SV2 Upstream to respond with either a `SetupConnectionSuccess` or a - // `SetupConnectionError` inside a SV2 binary message frame - let mut incoming: StdFrame = match connection.receiver.recv().await { - Ok(frame) => frame.try_into()?, - Err(e) => { - error!("Upstream connection closed: {}", e); - return Err(CodecNoise( - codec_sv2::noise_sv2::Error::ExpectedIncomingHandshakeMessage, - )); - } - }; - - // Gets the binary frame message type from the message header - let message_type = if let Some(header) = incoming.get_header() { - header.msg_type() - } else { - return Err(framing_sv2::Error::ExpectedHandshakeFrame.into()); - }; - // Gets the message payload - let payload = incoming.payload(); - - // Handle the incoming message (should be either `SetupConnectionSuccess` or - // `SetupConnectionError`) - ParseCommonMessagesFromUpstream::handle_message_common( - self_.clone(), - message_type, - payload, - )?; - - // Send open channel request before returning - let nominal_hash_rate = self_.safe_lock(|u| { - u.difficulty_config - .safe_lock(|c| c.channel_nominal_hashrate) - .map_err(|_e| PoisonLock) - })??; - let user_identity = "ABC".to_string().try_into()?; - - // Get the min_extranonce_size from the instance - let min_extranonce_size = self_.safe_lock(|u| u.min_extranonce_size)?; - - let open_channel = Mining::OpenExtendedMiningChannel(OpenExtendedMiningChannel { - request_id: 0, // TODO - user_identity, // TODO - nominal_hash_rate, - max_target: u256_from_int(u64::MAX), // TODO - min_extranonce_size, - }); - - // reset channel hashrate so downstreams can manage from now on out - self_.safe_lock(|u| { - u.difficulty_config - .safe_lock(|d| d.channel_nominal_hashrate = 0.0) - .map_err(|_e| PoisonLock) - })??; - - let sv2_frame: StdFrame = Message::Mining(open_channel).try_into()?; - connection.send(sv2_frame).await?; - - Ok(()) - } - - /// Spawns tasks to handle incoming SV2 messages from the Upstream role. - /// - /// This method creates two main asynchronous tasks: - /// 1. A task to handle incoming SV2 frames, parsing them, routing them to the appropriate - /// message handlers (`handle_message_mining`), and forwarding translated messages to the - /// Bridge or responding directly to the upstream if necessary. - /// 2. A task to periodically check and update the nominal hashrate sent to the upstream based - /// on th - #[allow(clippy::result_large_err)] - pub fn parse_incoming(self_: Arc>) -> ProxyResult<'static, ()> { - let clone = self_.clone(); - let task_collector = self_.safe_lock(|s| s.task_collector.clone()).unwrap(); - let collector1 = task_collector.clone(); - let collector2 = task_collector.clone(); - let ( - tx_frame, - tx_sv2_extranonce, - tx_sv2_new_ext_mining_job, - tx_sv2_set_new_prev_hash, - recv, - tx_status, - ) = clone.safe_lock(|s| { - ( - s.connection.sender.clone(), - s.tx_sv2_extranonce.clone(), - s.tx_sv2_new_ext_mining_job.clone(), - s.tx_sv2_set_new_prev_hash.clone(), - s.connection.receiver.clone(), - s.tx_status.clone(), - ) - })?; - { - let self_ = self_.clone(); - let tx_status = tx_status.clone(); - let start_diff_management = tokio::task::spawn(async move { - // No need to start diff management immediatly - sleep(Duration::from_secs(10)).await; - loop { - handle_result!(tx_status, Self::try_update_hashrate(self_.clone()).await); - } - }); - let _ = collector1.safe_lock(|a| { - a.push(( - start_diff_management.abort_handle(), - "start_diff_management".to_string(), - )) - }); - } - - let parse_incoming = tokio::task::spawn(async move { - loop { - // Waiting to receive a message from the SV2 Upstream role - let incoming = handle_result!(tx_status, recv.recv().await); - let mut incoming: StdFrame = handle_result!(tx_status, incoming.try_into()); - // On message receive, get the message type from the message header and get the - // message payload - let message_type = - incoming - .get_header() - .ok_or(super::super::error::Error::FramingSv2( - framing_sv2::Error::ExpectedSv2Frame, - )); - - let message_type = handle_result!(tx_status, message_type).msg_type(); - - let payload = incoming.payload(); - - // Gets the response message for the received SV2 Upstream role message - // `handle_message_mining` takes care of the SetupConnection + - // SetupConnection.Success - let next_message_to_send = - Upstream::handle_message_mining(self_.clone(), message_type, payload); - - // Routes the incoming messages accordingly - match next_message_to_send { - // No translation required, simply respond to SV2 pool w a SV2 message - Ok(SendTo::Respond(message_for_upstream)) => { - let message = Message::Mining(message_for_upstream); - - let frame: StdFrame = handle_result!(tx_status, message.try_into()); - let frame: EitherFrame = frame.into(); - - // Relay the response message to the Upstream role - handle_result!(tx_status, tx_frame.send(frame).await); - } - // Does not send the messages anywhere, but instead handle them internally - Ok(SendTo::None(Some(m))) => { - match m { - Mining::OpenExtendedMiningChannelSuccess(m) => { - let prefix_len = m.extranonce_prefix.len(); - // update upstream_extranonce1_size for tracking - let miner_extranonce2_size = self_ - .safe_lock(|u| { - u.upstream_extranonce1_size = prefix_len; - u.min_extranonce_size as usize - }) - .map_err(|_e| PoisonLock); - let miner_extranonce2_size = - handle_result!(tx_status, miner_extranonce2_size); - let extranonce_prefix: Extranonce = m.extranonce_prefix.into(); - // Create the extended extranonce that will be saved in bridge and - // it will be used to open downstream (sv1) channels - // range 0 is the extranonce1 from upstream - // range 1 is the extranonce1 added by the tproxy - // range 2 is the extranonce2 used by the miner for rolling - // range 0 + range 1 is the extranonce1 sent to the miner - let tproxy_e1_len = super::super::utils::proxy_extranonce1_len( - m.extranonce_size as usize, - miner_extranonce2_size, - ); - let range_0 = 0..prefix_len; // upstream extranonce1 - let range_1 = prefix_len..prefix_len + tproxy_e1_len; // downstream extranonce1 - let range_2 = prefix_len + tproxy_e1_len - ..prefix_len + m.extranonce_size as usize; // extranonce2 - let extended = handle_result!(tx_status, ExtendedExtranonce::from_upstream_extranonce( - extranonce_prefix.clone(), range_0.clone(), range_1.clone(), range_2.clone(), - ).map_err(|err| InvalidExtranonce(format!("Impossible to create a valid extended extranonce from {extranonce_prefix:?} {range_0:?} {range_1:?} {range_2:?}: {err:?}")))); - handle_result!( - tx_status, - tx_sv2_extranonce.send((extended, m.channel_id)).await - ); - } - Mining::NewExtendedMiningJob(m) => { - let job_id = m.job_id; - let res = self_ - .safe_lock(|s| { - let _ = s.job_id.insert(job_id); - }) - .map_err(|_e| PoisonLock); - handle_result!(tx_status, res); - handle_result!(tx_status, tx_sv2_new_ext_mining_job.send(m).await); - } - Mining::SetNewPrevHash(m) => { - handle_result!(tx_status, tx_sv2_set_new_prev_hash.send(m).await); - } - Mining::CloseChannel(_m) => { - error!("Received Mining::CloseChannel msg from upstream!"); - handle_result!(tx_status, Err(NoUpstreamsConnected)); - } - Mining::OpenMiningChannelError(_) - | Mining::UpdateChannelError(_) - | Mining::SubmitSharesError(_) - | Mining::SetCustomMiningJobError(_) => { - error!("parse_incoming SV2 protocol error Message"); - handle_result!(tx_status, Err(m)); - } - // impossible state: handle_message_mining only returns - // the above 3 messages in the Ok(SendTo::None(Some(m))) case to be sent - // to the bridge for translation. - _ => panic!(), - } - } - Ok(SendTo::None(None)) => (), - // No need to handle impossible state just panic cause are impossible and we - // will never panic ;-) Verified: handle_message_mining only either panics, - // returns Ok(SendTo::None(None)) or Ok(SendTo::None(Some(m))), or returns Err - Ok(_) => panic!(), - Err(e) => { - let status = status::Status { - state: status::State::UpstreamShutdown(UpstreamIncoming(e)), - }; - error!( - "TERMINATING: Error handling pool role message: {:?}", - status - ); - if let Err(e) = tx_status.send(status).await { - error!("Status channel down: {:?}", e); - } - - break; - } - } - } - }); - let _ = collector2 - .safe_lock(|a| a.push((parse_incoming.abort_handle(), "parse_incoming".to_string()))); - - Ok(()) - } - - // Retrieves the current job ID. - // - // If work selection is enabled (which it is not for a Translator Proxy), - // it would return the last `SetCustomMiningJobSuccess` job ID. If - // work selection is disabled, it returns the job ID from the last - // `NewExtendedMiningJob` - #[allow(clippy::result_large_err)] - fn get_job_id( - self_: &Arc>, - ) -> Result>, super::super::error::Error<'static>> - { - self_ - .safe_lock(|s| { - if s.is_work_selection_enabled() { - s.last_job_id - .ok_or(super::super::error::Error::RolesSv2Logic( - RolesLogicError::NoValidTranslatorJob, - )) - } else { - s.job_id.ok_or(super::super::error::Error::RolesSv2Logic( - RolesLogicError::NoValidJob, - )) - } - }) - .map_err(|_e| PoisonLock) - } - - /// Spawns a task to handle outgoing `SubmitSharesExtended` messages. - /// - /// This task continuously receives `SubmitSharesExtended` messages from the - /// `rx_sv2_submit_shares_ext` channel (populated by the Bridge). It updates - /// the channel ID and job ID in the submit message (ensuring they match - /// the current upstream channel details), encodes the message into an SV2 frame, - /// and sends it to the upstream server. - #[allow(clippy::result_large_err)] - pub fn handle_submit(self_: Arc>) -> ProxyResult<'static, ()> { - let task_collector = self_.safe_lock(|s| s.task_collector.clone()).unwrap(); - let clone = self_.clone(); - let (tx_frame, receiver, tx_status) = clone.safe_lock(|s| { - ( - s.connection.sender.clone(), - s.rx_sv2_submit_shares_ext.clone(), - s.tx_status.clone(), - ) - })?; - - let handle_submit = tokio::task::spawn(async move { - loop { - let mut sv2_submit: SubmitSharesExtended = - handle_result!(tx_status, receiver.recv().await); - - let channel_id = self_ - .safe_lock(|s| { - s.channel_id - .ok_or(super::super::error::Error::RolesSv2Logic( - RolesLogicError::NotFoundChannelId, - )) - }) - .map_err(|_e| PoisonLock); - sv2_submit.channel_id = - handle_result!(tx_status, handle_result!(tx_status, channel_id)); - let job_id = Self::get_job_id(&self_); - sv2_submit.job_id = handle_result!(tx_status, handle_result!(tx_status, job_id)); - - let message = Message::Mining( - roles_logic_sv2::parsers_sv2::Mining::SubmitSharesExtended(sv2_submit), - ); - - let frame: StdFrame = handle_result!(tx_status, message.try_into()); - // Doesnt actually send because of Braiins Pool issue that needs to be fixed - - let frame: EitherFrame = frame.into(); - handle_result!(tx_status, tx_frame.send(frame).await); - } - }); - let _ = task_collector - .safe_lock(|a| a.push((handle_submit.abort_handle(), "handle_submit".to_string()))); - - Ok(()) - } - - // Unimplemented method to check if a submitted share is contained within the upstream target. - // - // This method is currently unimplemented (`todo!()`). Its purpose would be - // to validate a share against the target set by the upstream pool. - fn _is_contained_in_upstream_target(&self, _share: SubmitSharesExtended) -> bool { - todo!() - } - - // Creates the initial `SetupConnection` message for the SV2 handshake. - // - // This message contains information about the proxy acting as a mining device, - // including supported protocol versions, flags, and hardcoded endpoint details. - // - // TODO: The Mining Device information is currently hardcoded. It should ideally - // be configurable or derived from the downstream connections. - #[allow(clippy::result_large_err)] - fn get_setup_connection_message( - min_version: u16, - max_version: u16, - is_work_selection_enabled: bool, - ) -> ProxyResult<'static, SetupConnection<'static>> { - let endpoint_host = "0.0.0.0".to_string().into_bytes().try_into()?; - let vendor = String::new().try_into()?; - let hardware_version = String::new().try_into()?; - let firmware = String::new().try_into()?; - let device_id = String::new().try_into()?; - let flags = match is_work_selection_enabled { - false => 0b0000_0000_0000_0000_0000_0000_0000_0100, - true => 0b0000_0000_0000_0000_0000_0000_0000_0110, - }; - Ok(SetupConnection { - protocol: Protocol::MiningProtocol, - min_version, - max_version, - flags, - endpoint_host, - endpoint_port: 50, - vendor, - hardware_version, - firmware, - device_id, - }) - } -} - -impl ParseCommonMessagesFromUpstream for Upstream { - // Handles the SV2 `SetupConnectionSuccess` message received from the upstream. - // - // Returns `Ok(SendToCommon::None(None))` as this message is handled internally - // and does not require a direct response or forwarding. - fn handle_setup_connection_success( - &mut self, - m: roles_logic_sv2::common_messages_sv2::SetupConnectionSuccess, - ) -> Result { - info!( - "Received `SetupConnectionSuccess`: version={}, flags={:b}", - m.used_version, m.flags - ); - Ok(SendToCommon::None(None)) - } - - fn handle_setup_connection_error( - &mut self, - _: roles_logic_sv2::common_messages_sv2::SetupConnectionError, - ) -> Result { - todo!() - } - - fn handle_channel_endpoint_changed( - &mut self, - _: roles_logic_sv2::common_messages_sv2::ChannelEndpointChanged, - ) -> Result { - todo!() - } - - fn handle_reconnect(&mut self, _m: Reconnect) -> Result { - todo!() - } -} - -/// Connection-wide SV2 Upstream role messages parser implemented by a downstream ("downstream" -/// here is relative to the SV2 Upstream role and is represented by this `Upstream` struct). -impl ParseMiningMessagesFromUpstream for Upstream { - /// Returns the type of channel used between this proxy and the SV2 Upstream. - /// For a Translator Proxy, this is always `Extended`. - fn get_channel_type(&self) -> SupportedChannelTypes { - SupportedChannelTypes::Extended - } - - /// Indicates whether work selection is enabled for this upstream connection. - /// For a Translator Proxy, work selection is handled by the upstream pool, - /// so this method always returns `false`. - fn is_work_selection_enabled(&self) -> bool { - false - } - - /// The SV2 `OpenStandardMiningChannelSuccess` message is NOT handled because it is NOT used - /// for the Translator Proxy as only `Extended` channels are used between the SV1/SV2 Translator - /// Proxy and the SV2 Upstream role. - fn handle_open_standard_mining_channel_success( - &mut self, - _m: roles_logic_sv2::mining_sv2::OpenStandardMiningChannelSuccess, - ) -> Result, RolesLogicError> { - panic!("Standard Mining Channels are not used in Translator Proxy") - } - - /// Handles the SV2 `OpenExtendedMiningChannelSuccess` message. - /// - /// This message is received after requesting to open an extended mining channel. - /// It provides the assigned `channel_id`, the extranonce prefix, the initial - /// mining `target`, and the expected `extranonce_size`. It stores the `channel_id` and - /// `extranonce_prefix`, updates the shared `target`, and prepares the extranonce - /// information (including calculating the size for the TProxy's added extranonce1) to be - /// sent to the Downstream handler for use with SV1 clients. - /// - /// Returns `Ok(SendTo::None(Some(Mining::OpenExtendedMiningChannelSuccess)))` - /// to indicate that the message has been handled internally and should be - /// forwarded to the Bridge. - fn handle_open_extended_mining_channel_success( - &mut self, - m: roles_logic_sv2::mining_sv2::OpenExtendedMiningChannelSuccess, - ) -> Result, RolesLogicError> { - info!( - "Received OpenExtendedMiningChannelSuccess with request id: {} and channel id: {}", - m.request_id, m.channel_id - ); - debug!("OpenStandardMiningChannelSuccess: {}", m); - let tproxy_e1_len = super::super::utils::proxy_extranonce1_len( - m.extranonce_size as usize, - self.min_extranonce_size.into(), - ) as u16; - if self.min_extranonce_size + tproxy_e1_len < m.extranonce_size { - return Err(RolesLogicError::InvalidExtranonceSize( - self.min_extranonce_size, - m.extranonce_size, - )); - } - self.target.safe_lock(|t| *t = m.target.to_vec())?; - - info!("Up: Successfully Opened Extended Mining Channel"); - self.channel_id = Some(m.channel_id); - self.extranonce_prefix = Some(m.extranonce_prefix.to_vec()); - let m = Mining::OpenExtendedMiningChannelSuccess(m.into_static()); - Ok(SendTo::None(Some(m))) - } - - /// Handles the SV2 `OpenExtendedMiningChannelError` message (TODO). - fn handle_open_mining_channel_error( - &mut self, - m: roles_logic_sv2::mining_sv2::OpenMiningChannelError, - ) -> Result, RolesLogicError> { - error!( - "Received OpenExtendedMiningChannelError with error code {}", - std::str::from_utf8(m.error_code.as_ref()).unwrap_or("unknown error code") - ); - Ok(SendTo::None(Some(Mining::OpenMiningChannelError( - m.as_static(), - )))) - } - - /// Handles the SV2 `UpdateChannelError` message (TODO). - fn handle_update_channel_error( - &mut self, - m: roles_logic_sv2::mining_sv2::UpdateChannelError, - ) -> Result, RolesLogicError> { - error!( - "Received UpdateChannelError with error code {}", - std::str::from_utf8(m.error_code.as_ref()).unwrap_or("unknown error code") - ); - Ok(SendTo::None(Some(Mining::UpdateChannelError( - m.as_static(), - )))) - } - - /// Handles the SV2 `CloseChannel` message (TODO). - fn handle_close_channel( - &mut self, - m: roles_logic_sv2::mining_sv2::CloseChannel, - ) -> Result, RolesLogicError> { - info!("Received CloseChannel for channel id: {}", m.channel_id); - Ok(SendTo::None(Some(Mining::CloseChannel(m.as_static())))) - } - - /// Handles the SV2 `SetExtranoncePrefix` message (TODO). - fn handle_set_extranonce_prefix( - &mut self, - _: roles_logic_sv2::mining_sv2::SetExtranoncePrefix, - ) -> Result, RolesLogicError> { - todo!() - } - - /// Handles the SV2 `SubmitSharesSuccess` message. - fn handle_submit_shares_success( - &mut self, - m: roles_logic_sv2::mining_sv2::SubmitSharesSuccess, - ) -> Result, RolesLogicError> { - info!("Received SubmitSharesSuccess"); - debug!("SubmitSharesSuccess: {}", m); - Ok(SendTo::None(None)) - } - - /// Handles the SV2 `SubmitSharesError` message. - fn handle_submit_shares_error( - &mut self, - m: roles_logic_sv2::mining_sv2::SubmitSharesError, - ) -> Result, RolesLogicError> { - error!( - "Received SubmitSharesError with error code {}", - std::str::from_utf8(m.error_code.as_ref()).unwrap_or("unknown error code") - ); - Ok(SendTo::None(None)) - } - - /// The SV2 `NewMiningJob` message is NOT handled because it is NOT used for the Translator - /// Proxy as only `Extended` channels are used between the SV1/SV2 Translator Proxy and the SV2 - /// Upstream role. - fn handle_new_mining_job( - &mut self, - _m: roles_logic_sv2::mining_sv2::NewMiningJob, - ) -> Result, RolesLogicError> { - panic!("Standard Mining Channels are not used in Translator Proxy") - } - - /// Handles the SV2 `NewExtendedMiningJob` message which is used (along with the SV2 - /// `SetNewPrevHash` message) to later create a SV1 `mining.notify` for the Downstream - /// role. - fn handle_new_extended_mining_job( - &mut self, - m: NewExtendedMiningJob, - ) -> Result, RolesLogicError> { - info!( - "Received new extended mining job for channel id: {} with job id: {} is_future: {}", - m.channel_id, - m.job_id, - m.is_future() - ); - debug!("NewExtendedMiningJob: {}", m); - if self.is_work_selection_enabled() { - Ok(SendTo::None(None)) - } else { - IS_NEW_JOB_HANDLED.store(false, std::sync::atomic::Ordering::SeqCst); - if !m.version_rolling_allowed { - warn!("VERSION ROLLING NOT ALLOWED IS A TODO"); - // todo!() - } - - let message = Mining::NewExtendedMiningJob(m.into_static()); - - Ok(SendTo::None(Some(message))) - } - } - - /// Handles the SV2 `SetNewPrevHash` message which is used (along with the SV2 - /// `NewExtendedMiningJob` message) to later create a SV1 `mining.notify` for the Downstream - /// role. - fn handle_set_new_prev_hash( - &mut self, - m: SetNewPrevHash, - ) -> Result, RolesLogicError> { - info!( - "Received SetNewPrevHash channel id: {}, job id: {}", - m.channel_id, m.job_id - ); - debug!("SetNewPrevHash: {}", m); - if self.is_work_selection_enabled() { - Ok(SendTo::None(None)) - } else { - let message = Mining::SetNewPrevHash(m.into_static()); - Ok(SendTo::None(Some(message))) - } - } - - /// Handles the SV2 `SetCustomMiningJobSuccess` message (TODO). - fn handle_set_custom_mining_job_success( - &mut self, - m: roles_logic_sv2::mining_sv2::SetCustomMiningJobSuccess, - ) -> Result, RolesLogicError> { - info!( - "Received SetCustomMiningJobSuccess for channel id: {} for job id: {}", - m.channel_id, m.job_id - ); - debug!("SetCustomMiningJobSuccess: {}", m); - self.last_job_id = Some(m.job_id); - Ok(SendTo::None(None)) - } - - /// Handles the SV2 `SetCustomMiningJobError` message (TODO). - fn handle_set_custom_mining_job_error( - &mut self, - _m: roles_logic_sv2::mining_sv2::SetCustomMiningJobError, - ) -> Result, RolesLogicError> { - unimplemented!() - } - - /// Handles the SV2 `SetTarget` message which updates the Downstream role(s) target - /// difficulty via the SV1 `mining.set_difficulty` message. - fn handle_set_target( - &mut self, - m: roles_logic_sv2::mining_sv2::SetTarget, - ) -> Result, RolesLogicError> { - info!("Received SetTarget for channel id: {}", m.channel_id); - debug!("SetTarget: {}", m); - let m = m.into_static(); - self.target.safe_lock(|t| *t = m.maximum_target.to_vec())?; - Ok(SendTo::None(None)) - } - - fn handle_set_group_channel( - &mut self, - _m: SetGroupChannel, - ) -> Result, RolesLogicError> { - todo!() - } -} diff --git a/roles/translator/src/lib/upstream_sv2/upstream_connection.rs b/roles/translator/src/lib/upstream_sv2/upstream_connection.rs deleted file mode 100644 index ef4d6a0a5a..0000000000 --- a/roles/translator/src/lib/upstream_sv2/upstream_connection.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! ## Upstream SV2 Connection Module -//! -//! Defines [`UpstreamConnection`], the structure responsible for managing the -//! communication channels with an upstream. - -use super::{super::error::ProxyResult, EitherFrame, StdFrame}; -use async_channel::{Receiver, Sender}; - -/// Handles the sending and receiving of messages to and from an SV2 Upstream role (most typically -/// a SV2 Pool server). -/// On upstream, we have a sv2connection, so we use the connection from network helpers -/// use network_helpers::Connection; -/// this does the dirty work of reading byte by byte in the socket and puts them in a complete -/// Sv2Messages frame and when the message is ready then sends to our Upstream -/// sender_incoming + receiver_outgoing are in network_helpers::Connection -#[derive(Debug, Clone)] -pub struct UpstreamConnection { - /// Receives messages from the SV2 Upstream role - pub receiver: Receiver, - /// Sends messages to the SV2 Upstream role - pub sender: Sender, -} - -impl UpstreamConnection { - /// Send a SV2 message to the Upstream role - pub async fn send(&mut self, sv2_frame: StdFrame) -> ProxyResult<'static, ()> { - let either_frame = sv2_frame.into(); - self.sender.send(either_frame).await?; - Ok(()) - } -} diff --git a/roles/translator/src/lib/utils.rs b/roles/translator/src/lib/utils.rs index 9668db0384..897ad46823 100644 --- a/roles/translator/src/lib/utils.rs +++ b/roles/translator/src/lib/utils.rs @@ -1,15 +1,231 @@ -/// Calculates the required length of the proxy's extranonce1. -/// -/// The proxy needs to calculate an extranonce1 value to send to the -/// upstream server. This function determines the length of that -/// extranonce1 value -/// FIXME: The pool only supported 16 bytes exactly for its -/// `extranonce1` field is no longer the case and the -/// code needs to be changed to support variable `extranonce1` lengths. -pub fn proxy_extranonce1_len( - channel_extranonce2_size: usize, - downstream_extranonce2_len: usize, +use binary_sv2::Sv2DataType; +use buffer_sv2::Slice; +use codec_sv2::Frame; +use roles_logic_sv2::{ + bitcoin::{ + block::{Header, Version}, + hashes::Hash, + CompactTarget, TxMerkleNode, + }, + mining_sv2::Target, + parsers_sv2::{AnyMessage, CommonMessages}, + utils::{bytes_to_hex, merkle_root_from_path, u256_to_block_hash}, +}; +use tracing::{debug, error}; +use v1::{client_to_server, server_to_client, utils::HexU32Be}; + +use crate::error::TproxyError; + +/// Validates an SV1 share against the target difficulty and job parameters. +/// +/// This function performs complete share validation by: +/// 1. Finding the corresponding job from the valid jobs list +/// 2. Constructing the full extranonce from extranonce1 and extranonce2 +/// 3. Calculating the merkle root from the coinbase transaction and merkle path +/// 4. Building the block header with the share's nonce and timestamp +/// 5. Hashing the header and comparing against the target difficulty +/// +/// # Arguments +/// * `share` - The SV1 submit message containing the share data +/// * `target` - The target difficulty for this share +/// * `extranonce1` - The first part of the extranonce (from server) +/// * `version_rolling_mask` - Optional mask for version rolling +/// * `valid_jobs` - List of valid jobs to validate against +/// +/// # Returns +/// * `Ok(true)` if the share is valid and meets the target +/// * `Ok(false)` if the share is valid but doesn't meet the target +/// * `Err(TproxyError)` if validation fails due to missing job or invalid data +pub fn validate_sv1_share( + share: &client_to_server::Submit<'static>, + target: Target, + extranonce1: Vec, + version_rolling_mask: Option, + valid_jobs: &[server_to_client::Notify<'static>], +) -> Result { + let job_id = share.job_id.clone(); + + let job = valid_jobs + .iter() + .find(|job| job.job_id == job_id) + .ok_or(TproxyError::JobNotFound)?; + + let mut full_extranonce = vec![]; + full_extranonce.extend_from_slice(extranonce1.as_slice()); + full_extranonce.extend_from_slice(share.extra_nonce2.0.as_ref()); + + let share_version = share + .version_bits + .clone() + .map(|vb| vb.0) + .unwrap_or(job.version.0); + let mask = version_rolling_mask.unwrap_or(HexU32Be(0x1FFFE000_u32)).0; + let version = (job.version.0 & !mask) | (share_version & mask); + + let prev_hash_vec: Vec = job.prev_hash.clone().into(); + let prev_hash = binary_sv2::U256::from_vec_(prev_hash_vec).map_err(TproxyError::BinarySv2)?; + + // calculate the merkle root from: + // - job coinbase_tx_prefix + // - full extranonce + // - job coinbase_tx_suffix + // - job merkle_path + let merkle_root: [u8; 32] = merkle_root_from_path( + job.coin_base1.as_ref(), + job.coin_base2.as_ref(), + full_extranonce.as_ref(), + job.merkle_branch.as_ref(), + ) + .ok_or(TproxyError::InvalidMerkleRoot)? + .try_into() + .map_err(|_| TproxyError::InvalidMerkleRoot)?; + + // create the header for validation + let header = Header { + version: Version::from_consensus(version as i32), + prev_blockhash: u256_to_block_hash(prev_hash), + merkle_root: TxMerkleNode::from_byte_array(merkle_root), + time: share.time.0, + bits: CompactTarget::from_consensus(job.bits.0), + nonce: share.nonce.0, + }; + + // convert the header hash to a target type for easy comparison + let hash = header.block_hash(); + let raw_hash: [u8; 32] = *hash.to_raw_hash().as_ref(); + let hash_as_target: Target = raw_hash.into(); + + // print hash_as_target and self.target as human readable hex + let hash_as_u256: binary_sv2::U256 = hash_as_target.clone().into(); + let mut hash_bytes = hash_as_u256.to_vec(); + hash_bytes.reverse(); // Convert to big-endian for display + let target_u256: binary_sv2::U256 = target.clone().into(); + let mut target_bytes = target_u256.to_vec(); + target_bytes.reverse(); // Convert to big-endian for display + + debug!( + "share validation \nshare:\t\t{}\ndownstream target:\t{}\n", + bytes_to_hex(&hash_bytes), + bytes_to_hex(&target_bytes), + ); + // check if the share hash meets the downstream target + if hash_as_target < target { + /*if self.share_accounting.is_share_seen(hash.to_raw_hash()) { + return Err(ShareValidationError::DuplicateShare); + }*/ + + return Ok(true); + } + + Ok(false) +} + +/// Calculates the required length of the proxy's extranonce prefix. +/// +/// This function determines how many bytes the proxy needs to reserve for its own +/// extranonce prefix, based on the difference between the channel's rollable extranonce +/// size and the downstream miner's rollable extranonce size. +/// +/// # Arguments +/// * `channel_rollable_extranonce_size` - Size of the rollable extranonce from the channel +/// * `downstream_rollable_extranonce_size` - Size of the rollable extranonce for downstream +/// +/// # Returns +/// The number of bytes needed for the proxy's extranonce prefix +pub fn proxy_extranonce_prefix_len( + channel_rollable_extranonce_size: usize, + downstream_rollable_extranonce_size: usize, ) -> usize { - // full_extranonce_len - pool_extranonce1_len - miner_extranonce2 = tproxy_extranonce1_len - channel_extranonce2_size - downstream_extranonce2_len + channel_rollable_extranonce_size - downstream_rollable_extranonce_size +} + +/// Extracts message type, payload, and parsed message from an SV2 frame. +/// +/// This function processes an SV2 frame and extracts the essential components: +/// - Message type identifier +/// - Raw payload bytes +/// - Parsed message structure +/// +/// # Arguments +/// * `frame` - The SV2 frame to process +/// +/// # Returns +/// A tuple containing (message_type, payload, parsed_message) on success, +/// or a TproxyError if the frame is invalid or cannot be parsed +pub fn message_from_frame( + frame: &mut Frame, Slice>, +) -> Result<(u8, Vec, AnyMessage<'static>), TproxyError> { + match frame { + Frame::Sv2(frame) => { + let header = frame.get_header().ok_or(TproxyError::UnexpectedMessage)?; + let message_type = header.msg_type(); + let mut payload = frame.payload().to_vec(); + let message: Result, _> = + (message_type, payload.as_mut_slice()).try_into(); + match message { + Ok(message) => { + let message = into_static(message)?; + Ok((message_type, payload.to_vec(), message)) + } + Err(_) => { + error!("Received frame with invalid payload or message type: {frame:?}"); + Err(TproxyError::UnexpectedMessage) + } + } + } + Frame::HandShake(f) => { + error!("Received unexpected handshake frame: {f:?}"); + Err(TproxyError::UnexpectedMessage) + } + } +} + +/// Converts a borrowed AnyMessage to a static lifetime version. +/// +/// This function takes an AnyMessage with a borrowed lifetime and converts it to +/// a static lifetime version, which is necessary for storing messages across +/// async boundaries and in data structures. +/// +/// # Arguments +/// * `m` - The AnyMessage to convert to static lifetime +/// +/// # Returns +/// A static lifetime version of the message, or TproxyError if the message +/// type is not supported for static conversion +pub fn into_static(m: AnyMessage<'_>) -> Result, TproxyError> { + match m { + AnyMessage::Mining(m) => Ok(AnyMessage::Mining(m.into_static())), + AnyMessage::Common(m) => match m { + CommonMessages::ChannelEndpointChanged(m) => Ok(AnyMessage::Common( + CommonMessages::ChannelEndpointChanged(m.into_static()), + )), + CommonMessages::SetupConnection(m) => Ok(AnyMessage::Common( + CommonMessages::SetupConnection(m.into_static()), + )), + CommonMessages::SetupConnectionError(m) => Ok(AnyMessage::Common( + CommonMessages::SetupConnectionError(m.into_static()), + )), + CommonMessages::SetupConnectionSuccess(m) => Ok(AnyMessage::Common( + CommonMessages::SetupConnectionSuccess(m.into_static()), + )), + CommonMessages::Reconnect(m) => Ok(AnyMessage::Common(CommonMessages::Reconnect( + m.into_static(), + ))), + }, + _ => Err(TproxyError::UnexpectedMessage), + } +} + +/// Messages used for coordinating shutdown across different components. +/// +/// This enum defines the different types of shutdown signals that can be sent +/// through the broadcast channel to coordinate graceful shutdown of the translator. +#[derive(Debug, Clone)] +pub enum ShutdownMessage { + /// Shutdown all components immediately + ShutdownAll, + /// Shutdown all downstream connections + DownstreamShutdownAll, + /// Shutdown a specific downstream connection by ID + DownstreamShutdown(u32), } diff --git a/roles/translator/src/main.rs b/roles/translator/src/main.rs index 38d4139720..851715a786 100644 --- a/roles/translator/src/main.rs +++ b/roles/translator/src/main.rs @@ -1,13 +1,11 @@ mod args; +use std::process; -pub use translator_sv2::{ - config, downstream_sv1, error, proxy, status, upstream_sv2, TranslatorSv2, -}; - -use tracing::info; +use config_helpers::logging::init_logging; +pub use translator_sv2::{config, error, status, sv1, sv2, TranslatorSv2}; use crate::args::process_cli_args; -use config_helpers::logging::init_logging; + /// Entrypoint for the Translator binary. /// /// Loads the configuration from TOML and initializes the main runtime @@ -18,8 +16,10 @@ async fn main() { Ok(p) => p, Err(e) => panic!("failed to load config: {e}"), }; + init_logging(proxy_config.log_dir()); - info!("Proxy Config: {:?}", &proxy_config); TranslatorSv2::new(proxy_config).start().await; + + process::exit(1); } diff --git a/test/integration-tests/Cargo.lock b/test/integration-tests/Cargo.lock index e2191a37c4..3edcc5a405 100644 --- a/test/integration-tests/Cargo.lock +++ b/test/integration-tests/Cargo.lock @@ -1041,6 +1041,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlers_sv2" +version = "0.1.0" +dependencies = [ + "binary_sv2", + "common_messages_sv2", + "job_declaration_sv2", + "mining_sv2", + "parsers_sv2", + "template_distribution_sv2", + "trait-variant", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -2037,6 +2050,7 @@ dependencies = [ "channels_sv2", "codec_sv2", "common_messages_sv2", + "handlers_sv2", "hex-conservative 0.3.0", "job_declaration_sv2", "mining_sv2", @@ -2617,22 +2631,37 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "translator_sv2" -version = "1.0.0" +version = "2.0.0" dependencies = [ "async-channel", "async-recursion 0.3.2", + "binary_sv2", "buffer_sv2", - "clap", + "codec_sv2", "config", "config-helpers", "error_handling", + "framing_sv2", "futures", "key-utils", + "network_helpers_sv2", "once_cell", "primitive-types", "rand 0.8.5", + "roles_logic_sv2", "serde", "serde_json", "stratum-common", diff --git a/test/integration-tests/lib/mod.rs b/test/integration-tests/lib/mod.rs index 8258a8439c..948c4c4931 100644 --- a/test/integration-tests/lib/mod.rs +++ b/test/integration-tests/lib/mod.rs @@ -233,26 +233,23 @@ pub fn start_sv2_translator(upstream: SocketAddr) -> (TranslatorSv2, SocketAddr) let listening_address = get_available_address(); let listening_port = listening_address.port(); let min_individual_miner_hashrate = measure_hashrate(1) as f32; - let channel_diff_update_interval = 60; - let channel_nominal_hashrate = min_individual_miner_hashrate; + + // Create upstream configuration + let upstream_config = translator_sv2::config::Upstream::new( + upstream_address, + upstream_port, + upstream_authority_pubkey, + ); + + // Create downstream difficulty configuration let downstream_difficulty_config = translator_sv2::config::DownstreamDifficultyConfig::new( min_individual_miner_hashrate, SHARES_PER_MINUTE, 0, 0, ); - let upstream_difficulty_config = translator_sv2::config::UpstreamDifficultyConfig::new( - channel_diff_update_interval, - channel_nominal_hashrate, - 0, - false, - ); - let upstream_conf = translator_sv2::config::UpstreamConfig::new( - upstream_address, - upstream_port, - upstream_authority_pubkey, - upstream_difficulty_config, - ); + + // Create downstream configuration let downstream_conf = translator_sv2::config::DownstreamConfig::new( listening_address.ip().to_string(), listening_port, @@ -262,16 +259,22 @@ pub fn start_sv2_translator(upstream: SocketAddr) -> (TranslatorSv2, SocketAddr) let min_extranonce2_size = 4; let config = translator_sv2::config::TranslatorConfig::new( - upstream_conf, + vec![upstream_config], // New API expects a vector of upstreams downstream_conf, 2, 2, min_extranonce2_size, + "test_user".to_string(), // user_identity parameter + true, // aggregate_channels parameter ); - let translator_v2 = translator_sv2::TranslatorSv2::new(config); - let clone_translator_v2 = translator_v2.clone(); - tokio::spawn(async move { - clone_translator_v2.start().await; + let translator_v2 = translator_sv2::TranslatorSv2::new(config.clone()); + let translator_for_spawn = translator_sv2::TranslatorSv2::new(config); + // Spawn using thread instead of tokio::spawn to avoid Send issues + std::thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + translator_for_spawn.start().await; + }); }); (translator_v2, listening_address) } diff --git a/test/integration-tests/tests/jdc_block_propagation.rs b/test/integration-tests/tests/jdc_block_propagation.rs index 4869aeaac3..da92551c7a 100644 --- a/test/integration-tests/tests/jdc_block_propagation.rs +++ b/test/integration-tests/tests/jdc_block_propagation.rs @@ -5,7 +5,7 @@ use integration_tests_sv2::{ }; use stratum_common::roles_logic_sv2::{job_declaration_sv2::*, template_distribution_sv2::*}; -// Block propagated from JDC to TP +// Block propogated from JDC to TP #[tokio::test] async fn propagated_from_jdc_to_tp() { start_tracing(); diff --git a/test/integration-tests/tests/jdc_fallback.rs b/test/integration-tests/tests/jdc_fallback.rs index 908d3cf8d7..3c5db4173f 100644 --- a/test/integration-tests/tests/jdc_fallback.rs +++ b/test/integration-tests/tests/jdc_fallback.rs @@ -14,7 +14,6 @@ use stratum_common::roles_logic_sv2::{ // the currently connected pool. // // This ignore directive can be removed once this issue is resolved: https://github.com/stratum-mining/stratum/issues/1574. -#[ignore] #[tokio::test] async fn test_jdc_pool_fallback_after_submit_rejection() { start_tracing(); diff --git a/test/integration-tests/tests/jds_block_propagation.rs b/test/integration-tests/tests/jds_block_propagation.rs index 811bc461a4..7dd820e60d 100644 --- a/test/integration-tests/tests/jds_block_propagation.rs +++ b/test/integration-tests/tests/jds_block_propagation.rs @@ -5,7 +5,7 @@ use integration_tests_sv2::{ }; use stratum_common::roles_logic_sv2::{job_declaration_sv2::*, template_distribution_sv2::*}; -// Block propagated from JDS to TP +// Block propogated from JDS to TP #[tokio::test] async fn propagated_from_jds_to_tp() { start_tracing(); diff --git a/utils/Cargo.lock b/utils/Cargo.lock index 88519c07bb..23c26c7012 100644 --- a/utils/Cargo.lock +++ b/utils/Cargo.lock @@ -575,6 +575,19 @@ version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" +[[package]] +name = "handlers_sv2" +version = "0.1.0" +dependencies = [ + "binary_sv2", + "common_messages_sv2", + "job_declaration_sv2", + "mining_sv2", + "parsers_sv2", + "template_distribution_sv2", + "trait-variant", +] + [[package]] name = "hashbrown" version = "0.7.2" @@ -1038,6 +1051,7 @@ dependencies = [ "channels_sv2", "codec_sv2", "common_messages_sv2", + "handlers_sv2", "hex-conservative 0.3.0", "job_declaration_sv2", "mining_sv2", @@ -1294,6 +1308,17 @@ dependencies = [ "once_cell", ] +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "typenum" version = "1.18.0"