Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions crates/rbuilder-operator/src/clickhouse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use tracing::error;

use crate::{flashbots_config::BuiltBlocksClickhouseConfig, metrics::ClickhouseMetrics};
use crate::{
flashbots_config::BuiltBlocksClickhouseConfig,
metrics::{set_disk_backup_max_size, ClickhouseMetrics},
};

/// BlockRow to insert in clickhouse and also as entry type for the indexer since the BlockRow is made from a few &objects so it makes no sense to have a Block type and copy all the fields.
#[derive(Debug, Clone, Serialize, Deserialize, Row)]
Expand Down Expand Up @@ -145,6 +148,10 @@ impl BuiltBlocksWriter {
let task_manager = rbuilder_utils::tasks::TaskManager::current();
let task_executor = task_manager.executor();

let backup_max_size_bytes =
config.disk_max_size_mb.unwrap_or(DEFAULT_MAX_DISK_SIZE_MB) * MEGA;
set_disk_backup_max_size(backup_max_size_bytes);

let (block_tx, block_rx) = mpsc::channel::<BlockRow>(BUILT_BLOCKS_CHANNEL_SIZE);
spawn_clickhouse_inserter_and_backup::<BlockRow, BlockRow, ClickhouseMetrics>(
&client,
Expand All @@ -153,7 +160,7 @@ impl BuiltBlocksWriter {
BLOCKS_TABLE_NAME.to_string(),
"".to_string(), // No buildername used in blocks table.
Some(config.disk_database_path),
Some(config.disk_max_size_mb.unwrap_or(DEFAULT_MAX_DISK_SIZE_MB) * MEGA),
Some(backup_max_size_bytes),
config
.memory_max_size_mb
.unwrap_or(DEFAULT_MAX_MEMORY_SIZE_MB)
Expand Down
12 changes: 7 additions & 5 deletions crates/rbuilder-operator/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,15 @@ register_metrics! {
IntGauge::new("clickhouse_queue_size", "Size of the queue of the task that is inserting into clickhouse").unwrap();
pub static CLICKHOUSE_DISK_BACKUP_SIZE_BYTES: IntGauge =
IntGauge::new("clickhouse_disk_backup_size_bytes", "Space used in bytes by the local DB for failed commit batches.").unwrap();
pub static CLICKHOUSE_DISK_BACKUP_MAX_SIZE_BYTES: IntGauge =
IntGauge::new("clickhouse_disk_backup_max_size_bytes", "Max space used in bytes by the local DB for failed commit batches. If clickhouse_disk_backup_size_bytes reaches this value we drop data").unwrap();

pub static CLICKHOUSE_DISK_BACKUP_SIZE_BATCHES: IntGauge =
IntGauge::new("clickhouse_disk_backup_size_batches", "Amount of batches in local DB for failed commit batches.").unwrap();
pub static CLICKHOUSE_MEMORY_BACKUP_SIZE_BYTES: IntGauge =
IntGauge::new("clickhouse_memory_backup_size_bytes", "Space used in bytes by the in memory DB for failed commit batches.").unwrap();
pub static CLICKHOUSE_MEMORY_BACKUP_SIZE_BATCHES: IntGauge =
IntGauge::new("clickhouse_memory_backup_size_batches", "Amount of batches in in memory DB for failed commit batches.").unwrap();





}

/*
Expand Down Expand Up @@ -102,6 +100,10 @@ pub(super) fn set_bidding_service_version(version: Version) {

pub(crate) struct ClickhouseMetrics {}

pub(crate) fn set_disk_backup_max_size(max_size_bytes: u64) {
CLICKHOUSE_DISK_BACKUP_MAX_SIZE_BYTES.set(max_size_bytes as i64);
}

impl rbuilder_utils::clickhouse::backup::metrics::Metrics for ClickhouseMetrics {
fn increment_write_failures(_err: String) {
CLICKHOUSE_WRITE_FAILURES.inc();
Expand Down
Loading