diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0985d33202..09e0918644 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,11 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE
### Added
- In the `/v3/transaction/{txid}` RPC endpoint, added `block_height` and `is_canonical` to the response.
+- Improved block validation in `stacks-inspect`.
+
+### Changed
+
+- Removed `validate-naka-block` option in `stacks-inspect`, merging it with `validate-block` so that users do not need to differentiate between the two.
## [3.3.0.0.2]
diff --git a/contrib/stacks-inspect/src/lib.rs b/contrib/stacks-inspect/src/lib.rs
index 13026508ac..4f58522db4 100644
--- a/contrib/stacks-inspect/src/lib.rs
+++ b/contrib/stacks-inspect/src/lib.rs
@@ -13,9 +13,10 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
+use std::io::Write;
use std::path::PathBuf;
use std::time::Instant;
-use std::{fs, process};
+use std::{fs, io, process};
use clarity::types::chainstate::SortitionId;
use clarity::util::hash::{Sha512Trunc256Sum, to_hex};
@@ -115,86 +116,266 @@ pub fn drain_common_opts(argv: &mut Vec, start_at: usize) -> CommonOpts
opts
}
-/// Replay blocks from chainstate database
-/// Terminates on error using `process::exit()`
-///
-/// Arguments:
-/// - `argv`: Args in CLI format: ` [args...]`
-pub fn command_validate_block(argv: &[String], conf: Option<&Config>) {
- let print_help_and_exit = || -> ! {
- let n = &argv[0];
- eprintln!("Usage:");
- eprintln!(" {n} ");
- eprintln!(" {n} prefix ");
- eprintln!(" {n} index-range ");
- eprintln!(" {n} range ");
- eprintln!(" {n} ");
- process::exit(1);
- };
- let start = Instant::now();
- let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit());
- let mode = argv.get(2).map(String::as_str);
- let staging_blocks_db_path = format!("{db_path}/chainstate/vm/index.sqlite");
- let conn =
- Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY)
- .unwrap();
-
- let query = match mode {
- Some("prefix") => format!(
- "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"",
- argv[3]
- ),
- Some("first") => format!(
- "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}",
- argv[3]
- ),
- Some("range") => {
- let arg4 = argv[3]
+#[derive(Clone)]
+enum BlockSource {
+ Nakamoto,
+ Epoch2,
+}
+
+#[derive(Clone)]
+struct BlockScanEntry {
+ index_block_hash: StacksBlockId,
+ source: BlockSource,
+}
+
+enum BlockSelection {
+ All,
+ Prefix(String),
+ Last(u64),
+ HeightRange { start: u64, end: u64 },
+ IndexRange { start: u64, end: u64 },
+ NakaIndexRange { start: u64, end: u64 },
+ IndexRangeInfo,
+ NakaIndexRangeInfo,
+}
+
+impl BlockSelection {
+ fn clause(&self) -> String {
+ match self {
+ BlockSelection::All => "WHERE orphaned = 0 ORDER BY height ASC".into(),
+ BlockSelection::Prefix(prefix) => format!(
+ "WHERE orphaned = 0 AND index_block_hash LIKE '{prefix}%' ORDER BY height ASC",
+ ),
+ BlockSelection::Last(count) => {
+ format!("WHERE orphaned = 0 ORDER BY height DESC LIMIT {count}")
+ }
+ BlockSelection::HeightRange { start, end } => format!(
+ "WHERE orphaned = 0 AND height BETWEEN {start} AND {} ORDER BY height ASC",
+ end.saturating_sub(1)
+ ),
+ BlockSelection::IndexRange { start, end } => {
+ let blocks = end.saturating_sub(*start);
+ format!("WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}")
+ }
+ BlockSelection::NakaIndexRange { start, end } => {
+ let blocks = end.saturating_sub(*start);
+ format!("WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}")
+ }
+ BlockSelection::IndexRangeInfo | BlockSelection::NakaIndexRangeInfo => {
+ unreachable!("Info selections should not generate SQL clauses")
+ }
+ }
+ }
+}
+
+fn parse_block_selection(mode: Option<&str>, argv: &[String]) -> Result {
+ match mode {
+ Some("prefix") => {
+ let prefix = argv
+ .get(3)
+ .ok_or_else(|| "Missing ".to_string())?
+ .clone();
+ Ok(BlockSelection::Prefix(prefix))
+ }
+ Some("last") => {
+ let count = argv
+ .get(3)
+ .ok_or_else(|| "Missing ".to_string())?
.parse::()
- .expect(" not a valid u64");
- let arg5 = argv[4].parse::().expect(" not a valid u64");
- let start = arg4.saturating_sub(1);
- let blocks = arg5.saturating_sub(arg4);
- format!(
- "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}"
- )
+ .map_err(|_| " must be a u64".to_string())?;
+ Ok(BlockSelection::Last(count))
}
- Some("index-range") => {
- let start = argv[3]
+ Some("range") => {
+ let start = argv
+ .get(3)
+ .ok_or_else(|| "Missing ".to_string())?
.parse::()
- .expect(" not a valid u64");
- let end = argv[4].parse::().expect(" not a valid u64");
- let blocks = end.saturating_sub(start);
- format!(
- "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}"
- )
+ .map_err(|_| " must be a u64".to_string())?;
+ let end = argv
+ .get(4)
+ .ok_or_else(|| "Missing ".to_string())?
+ .parse::()
+ .map_err(|_| " must be a u64".to_string())?;
+ if start >= end {
+ return Err(" must be < ".into());
+ }
+ Ok(BlockSelection::HeightRange { start, end })
}
- Some("last") => format!(
- "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}",
- argv[3]
- ),
- Some(_) => print_help_and_exit(),
- // Default to ALL blocks
- None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(),
- };
+ Some("index-range") => match argv.get(3) {
+ None => Ok(BlockSelection::IndexRangeInfo),
+ Some(start_arg) => {
+ let start = start_arg
+ .parse::()
+ .map_err(|_| " must be a u64".to_string())?;
+ let end = argv
+ .get(4)
+ .ok_or_else(|| "Missing ".to_string())?
+ .parse::()
+ .map_err(|_| " must be a u64".to_string())?;
+ if start >= end {
+ return Err(" must be < ".into());
+ }
+ Ok(BlockSelection::IndexRange { start, end })
+ }
+ },
+ Some("naka-index-range") => match argv.get(3) {
+ None => Ok(BlockSelection::NakaIndexRangeInfo),
+ Some(start_arg) => {
+ let start = start_arg
+ .parse::()
+ .map_err(|_| " must be a u64".to_string())?;
+ let end = argv
+ .get(4)
+ .ok_or_else(|| "Missing ".to_string())?
+ .parse::()
+ .map_err(|_| " must be a u64".to_string())?;
+ if start >= end {
+ return Err(" must be < ".into());
+ }
+ Ok(BlockSelection::NakaIndexRange { start, end })
+ }
+ },
+ Some(other) => Err(format!("Unrecognized option: {other}")),
+ None => Ok(BlockSelection::All),
+ }
+}
+
+fn collect_block_entries_for_selection(
+ db_path: &str,
+ selection: &BlockSelection,
+ chainstate: &StacksChainState,
+) -> Vec {
+ let mut entries = Vec::new();
+ let clause = selection.clause();
+
+ match selection {
+ BlockSelection::Last(limit) => {
+ if collect_nakamoto_entries(&mut entries, &clause, chainstate, Some(*limit)) {
+ return entries;
+ }
+ collect_epoch2_entries(&mut entries, &clause, db_path, Some(*limit));
+ }
+ BlockSelection::IndexRange { .. } => {
+ collect_epoch2_entries(&mut entries, &clause, db_path, None);
+ }
+ BlockSelection::NakaIndexRange { .. } => {
+ collect_nakamoto_entries(&mut entries, &clause, chainstate, None);
+ }
+ _ => {
+ collect_epoch2_entries(&mut entries, &clause, db_path, None);
+ collect_nakamoto_entries(&mut entries, &clause, chainstate, None);
+ }
+ }
- let mut stmt = conn.prepare(&query).unwrap();
- let mut hashes_set = stmt.query(NO_PARAMS).unwrap();
+ entries
+}
+
+fn limit_reached(limit: Option, current: usize) -> bool {
+ limit.is_some_and(|max| current >= max as usize)
+}
- let mut index_block_hashes: Vec = vec![];
- while let Ok(Some(row)) = hashes_set.next() {
- index_block_hashes.push(row.get(0).unwrap());
+fn count_epoch2_index_entries(db_path: &str) -> u64 {
+ let staging_blocks_db_path = format!("{db_path}/chainstate/vm/index.sqlite");
+ let conn =
+ Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY)
+ .unwrap_or_else(|e| {
+ panic!("Failed to open staging blocks DB at {staging_blocks_db_path}: {e}");
+ });
+ let sql = "SELECT COUNT(*) FROM staging_blocks WHERE orphaned = 0";
+ let mut stmt = conn.prepare(sql).unwrap_or_else(|e| {
+ panic!("Failed to prepare query over staging_blocks: {e}");
+ });
+ stmt.query_row(NO_PARAMS, |row| row.get::<_, u64>(0))
+ .unwrap_or_else(|e| {
+ panic!("Failed to count staging blocks: {e}");
+ })
+}
+
+fn count_nakamoto_index_entries(chainstate: &StacksChainState) -> u64 {
+ let sql = "SELECT COUNT(*) FROM nakamoto_staging_blocks WHERE orphaned = 0";
+ let conn = chainstate.nakamoto_blocks_db();
+ let mut stmt = conn.prepare(sql).unwrap_or_else(|e| {
+ panic!("Failed to prepare query over nakamoto_staging_blocks: {e}");
+ });
+ stmt.query_row(NO_PARAMS, |row| row.get::<_, u64>(0))
+ .unwrap_or_else(|e| {
+ panic!("Failed to count nakamoto staging blocks: {e}");
+ })
+}
+
+fn collect_epoch2_entries(
+ entries: &mut Vec,
+ clause: &str,
+ db_path: &str,
+ limit: Option,
+) -> bool {
+ if limit_reached(limit, entries.len()) {
+ return true;
}
- let total = index_block_hashes.len();
- println!("Will check {total} blocks");
- for (i, index_block_hash) in index_block_hashes.iter().enumerate() {
- if i % 100 == 0 {
- println!("Checked {i}...");
+ let staging_blocks_db_path = format!("{db_path}/chainstate/vm/index.sqlite");
+ let conn =
+ Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY)
+ .unwrap_or_else(|e| {
+ panic!("Failed to open staging blocks DB at {staging_blocks_db_path}: {e}");
+ });
+ let sql = format!("SELECT index_block_hash FROM staging_blocks {clause}");
+ let mut stmt = conn.prepare(&sql).unwrap_or_else(|e| {
+ panic!("Failed to prepare query over staging_blocks: {e}");
+ });
+ let mut rows = stmt.query(NO_PARAMS).unwrap_or_else(|e| {
+ panic!("Failed to query staging_blocks: {e}");
+ });
+ while let Some(row) = rows.next().unwrap_or_else(|e| {
+ panic!("Failed to read staging block row: {e}");
+ }) {
+ let index_block_hash: StacksBlockId = row.get(0).unwrap();
+ entries.push(BlockScanEntry {
+ index_block_hash,
+ source: BlockSource::Epoch2,
+ });
+
+ if limit_reached(limit, entries.len()) {
+ return true;
}
- replay_staging_block(db_path, index_block_hash, conf);
}
- println!("Finished. run_time_seconds = {}", start.elapsed().as_secs());
+
+ false
+}
+
+fn collect_nakamoto_entries(
+ entries: &mut Vec,
+ clause: &str,
+ chainstate: &StacksChainState,
+ limit: Option,
+) -> bool {
+ if limit_reached(limit, entries.len()) {
+ return true;
+ }
+
+ let sql = format!("SELECT index_block_hash FROM nakamoto_staging_blocks {clause}");
+ let conn = chainstate.nakamoto_blocks_db();
+ let mut stmt = conn.prepare(&sql).unwrap_or_else(|e| {
+ panic!("Failed to prepare query over nakamoto_staging_blocks: {e}");
+ });
+ let mut rows = stmt.query(NO_PARAMS).unwrap_or_else(|e| {
+ panic!("Failed to query nakamoto_staging_blocks: {e}");
+ });
+ while let Some(row) = rows.next().unwrap_or_else(|e| {
+ panic!("Failed to read Nakamoto staging block row: {e}");
+ }) {
+ let index_block_hash: StacksBlockId = row.get(0).unwrap();
+ entries.push(BlockScanEntry {
+ index_block_hash,
+ source: BlockSource::Nakamoto,
+ });
+
+ if limit_reached(limit, entries.len()) {
+ return true;
+ }
+ }
+
+ false
}
/// Replay blocks from chainstate database
@@ -202,91 +383,115 @@ pub fn command_validate_block(argv: &[String], conf: Option<&Config>) {
///
/// Arguments:
/// - `argv`: Args in CLI format: ` [args...]`
-pub fn command_validate_block_nakamoto(argv: &[String], conf: Option<&Config>) {
+pub fn command_validate_block(argv: &[String], conf: Option<&Config>) {
let print_help_and_exit = || -> ! {
let n = &argv[0];
eprintln!("Usage:");
eprintln!(" {n} ");
eprintln!(" {n} prefix ");
- eprintln!(" {n} index-range ");
- eprintln!(" {n} range ");
- eprintln!(" {n} ");
+ eprintln!(" {n} index-range [ ]");
+ eprintln!(" {n} naka-index-range [ ]");
+ eprintln!(" {n} range ");
+ eprintln!(" {n} ");
+ eprintln!(" {n} --early-exit ... # Exit on first error found");
process::exit(1);
};
- let start = Instant::now();
- let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit());
- let mode = argv.get(2).map(String::as_str);
- let chain_state_path = format!("{db_path}/chainstate/");
+ let start = Instant::now();
+ let mut args = argv.to_vec();
+ let early_exit = if let Some("--early-exit") = args.get(1).map(String::as_str) {
+ args.remove(1);
+ true
+ } else {
+ false
+ };
+ let db_path = args.get(1).unwrap_or_else(|| print_help_and_exit());
+ let mode = args.get(2).map(String::as_str);
+ let selection = parse_block_selection(mode, &args).unwrap_or_else(|err| {
+ eprintln!("{err}");
+ print_help_and_exit();
+ });
let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG);
-
+ let chain_state_path = format!("{db_path}/chainstate/");
let (chainstate, _) = StacksChainState::open(
conf.is_mainnet(),
conf.burnchain.chain_id,
&chain_state_path,
None,
)
- .unwrap();
-
- let conn = chainstate.nakamoto_blocks_db();
+ .unwrap_or_else(|e| {
+ eprintln!("Failed to open chainstate at {chain_state_path}: {e}");
+ process::exit(1);
+ });
- let query = match mode {
- Some("prefix") => format!(
- "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"",
- argv[3]
- ),
- Some("first") => format!(
- "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}",
- argv[3]
- ),
- Some("range") => {
- let arg4 = argv[3]
- .parse::()
- .expect(" not a valid u64");
- let arg5 = argv[4].parse::().expect(" not a valid u64");
- let start = arg4.saturating_sub(1);
- let blocks = arg5.saturating_sub(arg4);
- format!(
- "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}"
- )
+ match &selection {
+ BlockSelection::IndexRangeInfo => {
+ let total = count_epoch2_index_entries(db_path);
+ println!("Total available entries: {total}");
+ return;
}
- Some("index-range") => {
- let start = argv[3]
- .parse::()
- .expect(" not a valid u64");
- let end = argv[4].parse::().expect(" not a valid u64");
- let blocks = end.saturating_sub(start);
- format!(
- "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}"
- )
+ BlockSelection::NakaIndexRangeInfo => {
+ let total = count_nakamoto_index_entries(&chainstate);
+ println!("Total available entries: {total}");
+ return;
}
- Some("last") => format!(
- "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}",
- argv[3]
- ),
- Some(_) => print_help_and_exit(),
- // Default to ALL blocks
- None => "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0".into(),
- };
-
- let mut stmt = conn.prepare(&query).unwrap();
- let mut hashes_set = stmt.query(NO_PARAMS).unwrap();
+ _ => {}
+ }
- let mut index_block_hashes: Vec = vec![];
- while let Ok(Some(row)) = hashes_set.next() {
- index_block_hashes.push(row.get(0).unwrap());
+ let work_items = collect_block_entries_for_selection(db_path, &selection, &chainstate);
+ drop(chainstate);
+ if work_items.is_empty() {
+ println!("No blocks matched the requested selection.");
+ return;
+ }
+ let total_blocks = work_items.len();
+ let mut completed = 0;
+ let mut errors: Vec<(StacksBlockId, String)> = Vec::new();
+
+ for entry in work_items {
+ if let Err(e) = validate_entry(db_path, conf, &entry) {
+ if early_exit {
+ print!("\r");
+ io::stdout().flush().ok();
+ println!("Block {}: {e}", entry.index_block_hash);
+ process::exit(1);
+ }
+ print!("\r");
+ io::stdout().flush().ok();
+ errors.push((entry.index_block_hash.clone(), e));
+ }
+ completed += 1;
+ let pct = ((completed as f32 / total_blocks as f32) * 100.0).floor() as usize;
+ print!("\rValidating: {:>3}% ({}/{})", pct, completed, total_blocks);
+ io::stdout().flush().ok();
}
- let total = index_block_hashes.len();
- println!("Will check {total} blocks");
- for (i, index_block_hash) in index_block_hashes.iter().enumerate() {
- if i % 100 == 0 {
- println!("Checked {i}...");
+ print!("\rValidating: 100% ({}/{})\n", total_blocks, total_blocks);
+
+ if !errors.is_empty() {
+ println!(
+ "\nValidation completed with {} error(s) found in {}s:",
+ errors.len(),
+ start.elapsed().as_secs()
+ );
+ for (hash, message) in errors.iter() {
+ println!(" Block {hash}: {message}");
}
- replay_naka_staging_block(db_path, index_block_hash, conf);
+ process::exit(1);
+ }
+ println!(
+ "\nFinished validating {} blocks in {}s",
+ total_blocks,
+ start.elapsed().as_secs()
+ );
+}
+
+fn validate_entry(db_path: &str, conf: &Config, entry: &BlockScanEntry) -> Result<(), String> {
+ match entry.source {
+ BlockSource::Nakamoto => replay_naka_staging_block(db_path, &entry.index_block_hash, conf),
+ BlockSource::Epoch2 => replay_staging_block(db_path, &entry.index_block_hash, conf),
}
- println!("Finished. run_time_seconds = {}", start.elapsed().as_secs());
}
/// Replay mock mined blocks from JSON files
@@ -583,20 +788,21 @@ pub fn command_contract_hash(argv: &[String], _conf: Option<&Config>) {
}
/// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate
-fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<&Config>) {
- let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap();
+fn replay_staging_block(
+ db_path: &str,
+ block_id: &StacksBlockId,
+ conf: &Config,
+) -> Result<(), String> {
let chain_state_path = format!("{db_path}/chainstate/");
let sort_db_path = format!("{db_path}/burnchain/sortition");
- let conf = conf.unwrap_or(&DEFAULT_MAINNET_CONFIG);
-
let (mut chainstate, _) = StacksChainState::open(
conf.is_mainnet(),
conf.burnchain.chain_id,
&chain_state_path,
None,
)
- .unwrap();
+ .map_err(|e| format!("Failed to open chainstate at {chain_state_path}: {e:?}"))?;
let burnchain = conf.get_burnchain();
let epochs = conf.burnchain.get_epoch_list();
@@ -610,35 +816,34 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<
None,
true,
)
- .unwrap();
+ .map_err(|e| format!("Failed to open sortition DB at {sort_db_path}: {e:?}"))?;
+
let sort_tx = sortdb.tx_begin_at_tip();
let blocks_path = chainstate.blocks_path.clone();
- let (mut chainstate_tx, clarity_instance) = chainstate
+ let (chainstate_tx, clarity_instance) = chainstate
.chainstate_tx_begin()
- .expect("Failed to start chainstate tx");
+ .map_err(|e| format!("{e:?}"))?;
let mut next_staging_block =
- StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id)
- .expect("Failed to load staging block data")
- .expect("No such index block hash in block database");
+ StacksChainState::load_staging_block_info(&chainstate_tx.tx, block_id)
+ .map_err(|e| format!("Failed to load staging block info: {e:?}"))?
+ .ok_or_else(|| "No such index block hash in block database".to_string())?;
next_staging_block.block_data = StacksChainState::load_block_bytes(
&blocks_path,
&next_staging_block.consensus_hash,
&next_staging_block.anchored_block_hash,
)
- .unwrap()
+ .map_err(|e| format!("Failed to load block bytes: {e:?}"))?
.unwrap_or_default();
- let Some(parent_header_info) =
- StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap()
- else {
- println!("Failed to load parent head info for block: {index_block_hash_hex}");
- return;
- };
+ let parent_header_info =
+ StacksChainState::get_parent_header_info(&chainstate_tx, &next_staging_block)
+ .map_err(|e| format!("Failed to get parent header info: {e:?}"))?
+ .ok_or_else(|| "Missing parent header info".to_string())?;
- let block =
- StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block");
+ let block = StacksChainState::extract_stacks_block(&next_staging_block)
+ .map_err(|e| format!("{e:?}"))?;
let block_size = next_staging_block.block_data.len() as u64;
replay_block(
@@ -648,14 +853,14 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str, conf: Option<
&parent_header_info,
&next_staging_block.parent_microblock_hash,
next_staging_block.parent_microblock_seq,
- &block_id,
+ block_id,
&block,
block_size,
&next_staging_block.consensus_hash,
&next_staging_block.anchored_block_hash,
next_staging_block.commit_burn,
next_staging_block.sortition_burn,
- );
+ )
}
/// Process a mock mined block and call `replay_block()` to validate
@@ -727,7 +932,8 @@ fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock, conf: Opt
// I think the burn is used for miner rewards but not necessary for validation
0,
0,
- );
+ )
+ .expect("Failed to replay mock mined block");
}
/// Validate a block against chainstate
@@ -746,7 +952,7 @@ fn replay_block(
block_hash: &BlockHeaderHash,
block_commit_burn: u64,
block_sortition_burn: u64,
-) {
+) -> Result<(), String> {
let parent_block_header = match &parent_header_info.anchored_header {
StacksBlockHeaderTypes::Epoch2(bh) => bh,
StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"),
@@ -756,8 +962,7 @@ fn replay_block(
let Some(cost) =
StacksChainState::get_stacks_block_anchored_cost(chainstate_tx.conn(), block_id).unwrap()
else {
- println!("No header info found for {block_id}");
- return;
+ return Err(format!("No header info found for {block_id}"));
};
let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream(
@@ -769,8 +974,7 @@ fn replay_block(
parent_microblock_seq,
)
.unwrap() else {
- println!("No microblock stream found for {block_id}");
- return;
+ return Err(format!("No microblock stream found for {block_id}"));
};
let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) =
@@ -795,15 +999,13 @@ fn replay_block(
);
if !StacksChainState::check_block_attachment(parent_block_header, &block.header) {
- let msg = format!(
+ return Err(format!(
"Invalid stacks block {}/{} -- does not attach to parent {}/{}",
block_consensus_hash,
block.block_hash(),
parent_block_header.block_hash(),
&parent_header_info.consensus_hash
- );
- println!("{msg}");
- return;
+ ));
}
// validation check -- validate parent microblocks and find the ones that connect the
@@ -850,25 +1052,27 @@ fn replay_block(
) {
Ok((receipt, _, _)) => {
if receipt.anchored_block_cost != cost {
- println!(
+ return Err(format!(
"Failed processing block! block = {block_id}. Unexpected cost. expected = {cost}, evaluated = {}",
receipt.anchored_block_cost
- );
- process::exit(1);
+ ));
}
info!("Block processed successfully! block = {block_id}");
+ Ok(())
}
- Err(e) => {
- println!("Failed processing block! block = {block_id}, error = {e:?}");
- process::exit(1);
- }
- };
+ Err(e) => Err(format!(
+ "Failed processing block! block = {block_id}, error = {e:?}"
+ )),
+ }
}
/// Fetch and process a NakamotoBlock from database and call `replay_block_nakamoto()` to validate
-fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &Config) {
- let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap();
+fn replay_naka_staging_block(
+ db_path: &str,
+ block_id: &StacksBlockId,
+ conf: &Config,
+) -> Result<(), String> {
let chain_state_path = format!("{db_path}/chainstate/");
let sort_db_path = format!("{db_path}/burnchain/sortition");
@@ -878,7 +1082,7 @@ fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &C
&chain_state_path,
None,
)
- .unwrap();
+ .map_err(|e| format!("Failed to open chainstate: {e:?}"))?;
let burnchain = conf.get_burnchain();
let epochs = conf.burnchain.get_epoch_list();
@@ -892,14 +1096,16 @@ fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &C
None,
true,
)
- .unwrap();
+ .map_err(|e| format!("Failed to open sortition DB: {e:?}"))?;
let (block, block_size) = chainstate
.nakamoto_blocks_db()
- .get_nakamoto_block(&block_id)
- .unwrap()
- .unwrap();
- replay_block_nakamoto(&mut sortdb, &mut chainstate, &block, block_size).unwrap();
+ .get_nakamoto_block(block_id)
+ .map_err(|e| format!("Failed to load Nakamoto block: {e:?}"))?
+ .ok_or_else(|| "No block data found".to_string())?;
+
+ replay_block_nakamoto(&mut sortdb, &mut chainstate, &block, block_size)
+ .map_err(|e| format!("Failed to validate Nakamoto block: {e:?}"))
}
#[allow(clippy::result_large_err)]
diff --git a/contrib/stacks-inspect/src/main.rs b/contrib/stacks-inspect/src/main.rs
index c66b298fb2..9e5b98a0db 100644
--- a/contrib/stacks-inspect/src/main.rs
+++ b/contrib/stacks-inspect/src/main.rs
@@ -22,7 +22,7 @@ use clarity::types::chainstate::StacksPrivateKey;
use clarity_cli::DEFAULT_CLI_EPOCH;
use stacks_inspect::{
command_contract_hash, command_replay_mock_mining, command_try_mine, command_validate_block,
- command_validate_block_nakamoto, drain_common_opts,
+ drain_common_opts,
};
use stackslib::chainstate::stacks::miner::BlockBuilderSettings;
use stackslib::chainstate::stacks::{
@@ -1585,11 +1585,6 @@ check if the associated microblocks can be downloaded
process::exit(0);
}
- if argv[1] == "validate-naka-block" {
- command_validate_block_nakamoto(&argv[1..], common_opts.config.as_ref());
- process::exit(0);
- }
-
if argv[1] == "replay-mock-mining" {
command_replay_mock_mining(&argv[1..], common_opts.config.as_ref());
process::exit(0);
diff --git a/contrib/tools/block-validation.sh b/contrib/tools/block-validation.sh
index 397814d0d7..5490f3e44b 100755
--- a/contrib/tools/block-validation.sh
+++ b/contrib/tools/block-validation.sh
@@ -6,9 +6,9 @@ set -o pipefail
##
## ** Recommend to run this script in screen or tmux **
##
-## We'll need ~217GB per slice, plus an extra ~4500GB for the chainstate archive and marf DB
-## as of 09/2025:
-## for 10 slices, this is about 2.5TB
+## We'll need ~283GB per slice, plus an extra ~560GB for the marf DB and ~450GB for the chainstate (if not using a local chainstate)
+## as of 12/2025:
+## for 10 slices, this is about 3.5TB
NETWORK="mainnet" ## network to validate
REPO_DIR="$HOME/stacks-core" ## where to build the source
@@ -165,16 +165,19 @@ start_validation() {
local mode=$1
local total_blocks=0
local starting_block=0
- local inspect_command
local slice_counter=0
+ local range_command=""
+ local log_append=""
+ local inspect_bin="${REPO_DIR}/target/release/stacks-inspect"
+ local inspect_config="${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml"
+ local inspect_prefix="${inspect_bin} --config ${inspect_config} validate-block"
+
case "$mode" in
nakamoto)
## nakamoto blocks
echo "Mode: ${COLYELLOW}${mode}${COLRESET}"
- local log_append="_${mode}"
- inspect_command="validate-naka-block"
- ## get the total number of nakamoto blocks in db
- total_blocks=$(echo "select count(*) from nakamoto_block_headers" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite)
+ log_append="_${mode}"
+ range_command="naka-index-range"
starting_block=0 # for the block counter, start at this block
## use these values if `--testing` arg is provided (only validate 1_000 blocks)
${TESTING} && total_blocks=301883
@@ -183,16 +186,29 @@ start_validation() {
*)
## pre-nakamoto blocks
echo "Mode: ${COLYELLOW}pre-nakamoto${COLRESET}"
- local log_append=""
- inspect_command="validate-block"
- ## get the total number of blocks (with orphans) in db
- total_blocks=$(echo "select count(*) from staging_blocks where orphaned = 0" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite)
+ log_append=""
+ range_command="index-range"
starting_block=0 # for the block counter, start at this block
## use these values if `--testing` arg is provided (only validate 1_000 blocks) Note: 2.5 epoch is at 153106
- ${TESTING} && total_blocks=153000
- ${TESTING} && starting_block=152000
+ ${TESTING} && total_blocks=162200
+ ${TESTING} && starting_block=161200
;;
esac
+
+ # get the total number of blocks by running the command without args
+ if [ "${total_blocks}" -eq 0 ]; then
+ local count_output
+ local count_cmd="${inspect_prefix} ${SLICE_DIR}0 ${range_command}"
+ if ! count_output=$(${count_cmd} 2>/dev/null); then
+ echo "${COLRED}Error${COLRESET} retrieving total number of blocks from chainstate"
+ exit 1
+ fi
+ total_blocks=$(printf '%s\n' "${count_output}" | awk '/Total available entries: / {print $5}')
+ if [ -z "${total_blocks}" ]; then
+ echo "${COLRED}Error${COLRESET} parsing block count from stacks-inspect output"
+ exit 1
+ fi
+ fi
local block_diff=$((total_blocks - starting_block)) ## how many blocks are being validated
local slices=$((CORES - RESERVED)) ## how many validation slices to use
local slice_blocks=$((block_diff / slices)) ## how many blocks to validate per slice
@@ -218,9 +234,10 @@ start_validation() {
}
fi
fi
+ local slice_path="${SLICE_DIR}${slice_counter}"
local log_file="${LOG_DIR}/slice${slice_counter}${log_append}.log"
local log=" | tee -a ${log_file}"
- local cmd="${REPO_DIR}/target/release/stacks-inspect --config ${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml ${inspect_command} ${SLICE_DIR}${slice_counter} index-range $start_block_count $end_block_count 2>/dev/null"
+ local cmd="${inspect_prefix} ${slice_path} ${range_command} ${start_block_count} ${end_block_count} 2>/dev/null"
echo " Creating tmux window: ${COLGREEN}${TMUX_SESSION}:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}"
echo "Command: ${cmd}" > "${log_file}" ## log the command being run for the slice
echo "Validating indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}"
diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs
index 509acac4f7..d81bd28d80 100644
--- a/stackslib/src/chainstate/stacks/db/blocks.rs
+++ b/stackslib/src/chainstate/stacks/db/blocks.rs
@@ -5868,7 +5868,7 @@ impl StacksChainState {
/// parent block has been processed.
/// If it's not known, return None.
pub fn get_parent_header_info(
- chainstate_tx: &mut ChainstateTx,
+ chainstate_tx: &ChainstateTx,
next_staging_block: &StagingBlock,
) -> Result